From 975f66f2eebe9dadba04f275774d4ab83f74cf25 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sat, 13 Apr 2024 14:04:41 +0200 Subject: Adding upstream version 7.7.0+dfsg. Signed-off-by: Daniel Baumann --- ansible_collections/azure/azcollection/.gitignore | 9 + .../azure/azcollection/CHANGELOG.md | 729 ++ .../azure/azcollection/CONTRIBUTING.md | 68 + .../azure/azcollection/CredScanSuppressions.json | 17 + ansible_collections/azure/azcollection/FILES.json | 7215 ++++++++++++++++++++ ansible_collections/azure/azcollection/LICENSE | 674 ++ .../azure/azcollection/MANIFEST.json | 38 + ansible_collections/azure/azcollection/README.md | 97 + ansible_collections/azure/azcollection/ansible.cfg | 2 + .../azure/azcollection/azure-pipelines.yml | 45 + .../azcollection/meta/execution-environment.yml | 3 + .../azure/azcollection/meta/runtime.yml | 282 + .../azcollection/plugins/doc_fragments/azure.py | 143 + .../azcollection/plugins/doc_fragments/azure_rm.py | 95 + .../plugins/doc_fragments/azure_tags.py | 31 + .../azcollection/plugins/inventory/azure_rm.py | 656 ++ .../plugins/lookup/azure_keyvault_secret.py | 212 + .../plugins/module_utils/azure_rm_common.py | 1845 +++++ .../plugins/module_utils/azure_rm_common_ext.py | 215 + .../plugins/module_utils/azure_rm_common_rest.py | 104 + .../plugins/modules/azure_rm_account_info.py | 224 + .../plugins/modules/azure_rm_adapplication.py | 671 ++ .../plugins/modules/azure_rm_adapplication_info.py | 177 + .../plugins/modules/azure_rm_adgroup.py | 443 ++ .../plugins/modules/azure_rm_adgroup_info.py | 323 + .../plugins/modules/azure_rm_adpassword.py | 288 + .../plugins/modules/azure_rm_adpassword_info.py | 209 + .../plugins/modules/azure_rm_adserviceprincipal.py | 215 + .../modules/azure_rm_adserviceprincipal_info.py | 144 + .../plugins/modules/azure_rm_aduser.py | 413 ++ .../plugins/modules/azure_rm_aduser_info.py | 252 + .../azcollection/plugins/modules/azure_rm_aks.py | 1173 ++++ .../plugins/modules/azure_rm_aks_info.py | 188 + .../plugins/modules/azure_rm_aksagentpool.py | 526 ++ .../plugins/modules/azure_rm_aksagentpool_info.py | 335 + .../modules/azure_rm_aksagentpoolversion_info.py | 116 + .../plugins/modules/azure_rm_aksupgrade_info.py | 229 + .../plugins/modules/azure_rm_aksversion_info.py | 130 + .../plugins/modules/azure_rm_apimanagement.py | 670 ++ .../plugins/modules/azure_rm_apimanagement_info.py | 275 + .../modules/azure_rm_apimanagementservice.py | 345 + .../modules/azure_rm_apimanagementservice_info.py | 284 + .../plugins/modules/azure_rm_appgateway.py | 2431 +++++++ .../plugins/modules/azure_rm_appgateway_info.py | 233 + .../modules/azure_rm_applicationsecuritygroup.py | 245 + .../azure_rm_applicationsecuritygroup_info.py | 226 + .../plugins/modules/azure_rm_appserviceplan.py | 371 + .../modules/azure_rm_appserviceplan_info.py | 238 + .../plugins/modules/azure_rm_automationaccount.py | 174 + .../modules/azure_rm_automationaccount_info.py | 384 ++ .../plugins/modules/azure_rm_automationrunbook.py | 445 ++ .../modules/azure_rm_automationrunbook_info.py | 289 + .../plugins/modules/azure_rm_autoscale.py | 644 ++ .../plugins/modules/azure_rm_autoscale_info.py | 271 + .../plugins/modules/azure_rm_availabilityset.py | 384 ++ .../modules/azure_rm_availabilityset_info.py | 220 + .../plugins/modules/azure_rm_azurefirewall.py | 721 ++ .../plugins/modules/azure_rm_azurefirewall_info.py | 270 + .../plugins/modules/azure_rm_backupazurevm.py | 389 ++ .../plugins/modules/azure_rm_backupazurevm_info.py | 173 + .../plugins/modules/azure_rm_backuppolicy.py | 459 ++ .../plugins/modules/azure_rm_backuppolicy_info.py | 177 + .../plugins/modules/azure_rm_bastionhost.py | 549 ++ .../plugins/modules/azure_rm_bastionhost_info.py | 335 + .../plugins/modules/azure_rm_batchaccount.py | 337 + .../plugins/modules/azure_rm_cdnendpoint.py | 664 ++ .../plugins/modules/azure_rm_cdnendpoint_info.py | 321 + .../plugins/modules/azure_rm_cdnprofile.py | 299 + .../plugins/modules/azure_rm_cdnprofile_info.py | 266 + .../plugins/modules/azure_rm_cognitivesearch.py | 487 ++ .../modules/azure_rm_cognitivesearch_info.py | 316 + .../plugins/modules/azure_rm_containerinstance.py | 839 +++ .../modules/azure_rm_containerinstance_info.py | 359 + .../plugins/modules/azure_rm_containerregistry.py | 407 ++ .../modules/azure_rm_containerregistry_info.py | 277 + .../azure_rm_containerregistryreplication.py | 292 + .../azure_rm_containerregistryreplication_info.py | 179 + .../modules/azure_rm_containerregistrytag.py | 349 + .../modules/azure_rm_containerregistrytag_info.py | 236 + .../modules/azure_rm_containerregistrywebhook.py | 330 + .../azure_rm_containerregistrywebhook_info.py | 178 + .../plugins/modules/azure_rm_cosmosdbaccount.py | 647 ++ .../modules/azure_rm_cosmosdbaccount_info.py | 556 ++ .../plugins/modules/azure_rm_datafactory.py | 429 ++ .../plugins/modules/azure_rm_datafactory_info.py | 300 + .../plugins/modules/azure_rm_datalakestore.py | 809 +++ .../plugins/modules/azure_rm_datalakestore_info.py | 467 ++ .../plugins/modules/azure_rm_ddosprotectionplan.py | 241 + .../modules/azure_rm_ddosprotectionplan_info.py | 163 + .../plugins/modules/azure_rm_deployment.py | 715 ++ .../plugins/modules/azure_rm_deployment_info.py | 245 + .../plugins/modules/azure_rm_devtestlab.py | 280 + .../plugins/modules/azure_rm_devtestlab_info.py | 272 + .../modules/azure_rm_devtestlabarmtemplate_info.py | 223 + .../modules/azure_rm_devtestlabartifact_info.py | 246 + .../modules/azure_rm_devtestlabartifactsource.py | 362 + .../azure_rm_devtestlabartifactsource_info.py | 256 + .../modules/azure_rm_devtestlabcustomimage.py | 379 + .../modules/azure_rm_devtestlabcustomimage_info.py | 228 + .../modules/azure_rm_devtestlabenvironment.py | 384 ++ .../modules/azure_rm_devtestlabenvironment_info.py | 246 + .../plugins/modules/azure_rm_devtestlabpolicy.py | 397 ++ .../modules/azure_rm_devtestlabpolicy_info.py | 243 + .../plugins/modules/azure_rm_devtestlabschedule.py | 337 + .../modules/azure_rm_devtestlabschedule_info.py | 222 + .../modules/azure_rm_devtestlabvirtualmachine.py | 540 ++ .../azure_rm_devtestlabvirtualmachine_info.py | 329 + .../modules/azure_rm_devtestlabvirtualnetwork.py | 289 + .../azure_rm_devtestlabvirtualnetwork_info.py | 217 + .../plugins/modules/azure_rm_diskencryptionset.py | 318 + .../modules/azure_rm_diskencryptionset_info.py | 208 + .../plugins/modules/azure_rm_dnsrecordset.py | 553 ++ .../plugins/modules/azure_rm_dnsrecordset_info.py | 305 + .../plugins/modules/azure_rm_dnszone.py | 297 + .../plugins/modules/azure_rm_dnszone_info.py | 254 + .../plugins/modules/azure_rm_eventhub.py | 438 ++ .../plugins/modules/azure_rm_eventhub_info.py | 241 + .../plugins/modules/azure_rm_expressroute.py | 373 + .../plugins/modules/azure_rm_expressroute_info.py | 212 + .../plugins/modules/azure_rm_firewallpolicy.py | 465 ++ .../modules/azure_rm_firewallpolicy_info.py | 238 + .../plugins/modules/azure_rm_functionapp.py | 402 ++ .../plugins/modules/azure_rm_functionapp_info.py | 201 + .../plugins/modules/azure_rm_gallery.py | 303 + .../plugins/modules/azure_rm_gallery_info.py | 258 + .../plugins/modules/azure_rm_galleryimage.py | 552 ++ .../plugins/modules/azure_rm_galleryimage_info.py | 269 + .../modules/azure_rm_galleryimageversion.py | 637 ++ .../modules/azure_rm_galleryimageversion_info.py | 284 + .../plugins/modules/azure_rm_hdinsightcluster.py | 552 ++ .../modules/azure_rm_hdinsightcluster_info.py | 322 + .../plugins/modules/azure_rm_hostgroup.py | 290 + .../plugins/modules/azure_rm_hostgroup_info.py | 196 + .../azcollection/plugins/modules/azure_rm_image.py | 388 ++ .../plugins/modules/azure_rm_image_info.py | 328 + .../plugins/modules/azure_rm_iotdevice.py | 463 ++ .../plugins/modules/azure_rm_iotdevice_info.py | 308 + .../plugins/modules/azure_rm_iotdevicemodule.py | 369 + .../plugins/modules/azure_rm_iothub.py | 892 +++ .../plugins/modules/azure_rm_iothub_info.py | 614 ++ .../modules/azure_rm_iothubconsumergroup.py | 163 + .../plugins/modules/azure_rm_ipgroup.py | 313 + .../plugins/modules/azure_rm_ipgroup_info.py | 201 + .../plugins/modules/azure_rm_keyvault.py | 541 ++ .../plugins/modules/azure_rm_keyvault_info.py | 349 + .../plugins/modules/azure_rm_keyvaultkey.py | 370 + .../plugins/modules/azure_rm_keyvaultkey_info.py | 477 ++ .../plugins/modules/azure_rm_keyvaultsecret.py | 271 + .../modules/azure_rm_keyvaultsecret_info.py | 444 ++ .../plugins/modules/azure_rm_loadbalancer.py | 1075 +++ .../plugins/modules/azure_rm_loadbalancer_info.py | 219 + .../azcollection/plugins/modules/azure_rm_lock.py | 223 + .../plugins/modules/azure_rm_lock_info.py | 222 + .../modules/azure_rm_loganalyticsworkspace.py | 337 + .../modules/azure_rm_loganalyticsworkspace_info.py | 266 + .../plugins/modules/azure_rm_manageddisk.py | 656 ++ .../plugins/modules/azure_rm_manageddisk_info.py | 268 + .../plugins/modules/azure_rm_managementgroup.py | 415 ++ .../modules/azure_rm_managementgroup_info.py | 327 + .../modules/azure_rm_mariadbconfiguration.py | 239 + .../modules/azure_rm_mariadbconfiguration_info.py | 206 + .../plugins/modules/azure_rm_mariadbdatabase.py | 294 + .../modules/azure_rm_mariadbdatabase_info.py | 204 + .../modules/azure_rm_mariadbfirewallrule.py | 273 + .../modules/azure_rm_mariadbfirewallrule_info.py | 197 + .../plugins/modules/azure_rm_mariadbserver.py | 383 ++ .../plugins/modules/azure_rm_mariadbserver_info.py | 261 + .../modules/azure_rm_monitordiagnosticsetting.py | 640 ++ .../azure_rm_monitordiagnosticsetting_info.py | 366 + .../plugins/modules/azure_rm_monitorlogprofile.py | 388 ++ .../modules/azure_rm_multiplemanageddisks.py | 737 ++ .../plugins/modules/azure_rm_mysqlconfiguration.py | 233 + .../modules/azure_rm_mysqlconfiguration_info.py | 204 + .../plugins/modules/azure_rm_mysqldatabase.py | 292 + .../plugins/modules/azure_rm_mysqldatabase_info.py | 203 + .../plugins/modules/azure_rm_mysqlfirewallrule.py | 273 + .../modules/azure_rm_mysqlfirewallrule_info.py | 195 + .../plugins/modules/azure_rm_mysqlserver.py | 470 ++ .../plugins/modules/azure_rm_mysqlserver_info.py | 283 + .../plugins/modules/azure_rm_natgateway.py | 414 ++ .../plugins/modules/azure_rm_natgateway_info.py | 219 + .../plugins/modules/azure_rm_networkinterface.py | 946 +++ .../modules/azure_rm_networkinterface_info.py | 358 + .../plugins/modules/azure_rm_notificationhub.py | 390 ++ .../modules/azure_rm_notificationhub_info.py | 239 + .../modules/azure_rm_openshiftmanagedcluster.py | 855 +++ .../azure_rm_openshiftmanagedcluster_info.py | 383 ++ .../modules/azure_rm_postgresqlconfiguration.py | 234 + .../azure_rm_postgresqlconfiguration_info.py | 207 + .../plugins/modules/azure_rm_postgresqldatabase.py | 294 + .../modules/azure_rm_postgresqldatabase_info.py | 202 + .../modules/azure_rm_postgresqlfirewallrule.py | 271 + .../azure_rm_postgresqlfirewallrule_info.py | 195 + .../plugins/modules/azure_rm_postgresqlserver.py | 458 ++ .../modules/azure_rm_postgresqlserver_info.py | 276 + .../modules/azure_rm_privatednsrecordset.py | 494 ++ .../modules/azure_rm_privatednsrecordset_info.py | 256 + .../plugins/modules/azure_rm_privatednszone.py | 225 + .../modules/azure_rm_privatednszone_info.py | 244 + .../plugins/modules/azure_rm_privatednszonelink.py | 324 + .../modules/azure_rm_privatednszonelink_info.py | 195 + .../plugins/modules/azure_rm_privateendpoint.py | 341 + .../modules/azure_rm_privateendpoint_info.py | 303 + .../modules/azure_rm_privateendpointconnection.py | 348 + .../azure_rm_privateendpointconnection_info.py | 234 + .../azure_rm_privateendpointdnszonegroup.py | 367 + .../azure_rm_privateendpointdnszonegroup_info.py | 251 + .../plugins/modules/azure_rm_privatelinkservice.py | 596 ++ .../modules/azure_rm_privatelinkservice_info.py | 345 + .../modules/azure_rm_proximityplacementgroup.py | 251 + .../azure_rm_proximityplacementgroup_info.py | 208 + .../plugins/modules/azure_rm_publicipaddress.py | 445 ++ .../modules/azure_rm_publicipaddress_info.py | 327 + .../modules/azure_rm_recoveryservicesvault.py | 310 + .../modules/azure_rm_recoveryservicesvault_info.py | 205 + .../plugins/modules/azure_rm_rediscache.py | 853 +++ .../plugins/modules/azure_rm_rediscache_info.py | 376 + .../modules/azure_rm_rediscachefirewallrule.py | 322 + .../modules/azure_rm_registrationassignment.py | 273 + .../azure_rm_registrationassignment_info.py | 189 + .../modules/azure_rm_registrationdefinition.py | 445 ++ .../azure_rm_registrationdefinition_info.py | 243 + .../plugins/modules/azure_rm_resource.py | 420 ++ .../plugins/modules/azure_rm_resource_info.py | 443 ++ .../plugins/modules/azure_rm_resourcegroup.py | 286 + .../plugins/modules/azure_rm_resourcegroup_info.py | 235 + .../plugins/modules/azure_rm_roleassignment.py | 373 + .../modules/azure_rm_roleassignment_info.py | 335 + .../plugins/modules/azure_rm_roledefinition.py | 409 ++ .../modules/azure_rm_roledefinition_info.py | 312 + .../azcollection/plugins/modules/azure_rm_route.py | 215 + .../plugins/modules/azure_rm_route_info.py | 212 + .../plugins/modules/azure_rm_routetable.py | 196 + .../plugins/modules/azure_rm_routetable_info.py | 217 + .../plugins/modules/azure_rm_securitygroup.py | 834 +++ .../plugins/modules/azure_rm_securitygroup_info.py | 395 ++ .../plugins/modules/azure_rm_servicebus.py | 219 + .../plugins/modules/azure_rm_servicebus_info.py | 582 ++ .../plugins/modules/azure_rm_servicebusqueue.py | 347 + .../modules/azure_rm_servicebussaspolicy.py | 325 + .../plugins/modules/azure_rm_servicebustopic.py | 306 + .../azure_rm_servicebustopicsubscription.py | 314 + .../plugins/modules/azure_rm_snapshot.py | 397 ++ .../plugins/modules/azure_rm_sqldatabase.py | 596 ++ .../plugins/modules/azure_rm_sqldatabase_info.py | 291 + .../plugins/modules/azure_rm_sqlelasticpool.py | 558 ++ .../modules/azure_rm_sqlelasticpool_info.py | 292 + .../plugins/modules/azure_rm_sqlfirewallrule.py | 268 + .../modules/azure_rm_sqlfirewallrule_info.py | 207 + .../plugins/modules/azure_rm_sqlmanagedinstance.py | 800 +++ .../modules/azure_rm_sqlmanagedinstance_info.py | 468 ++ .../plugins/modules/azure_rm_sqlserver.py | 443 ++ .../plugins/modules/azure_rm_sqlserver_info.py | 291 + .../plugins/modules/azure_rm_storageaccount.py | 1288 ++++ .../modules/azure_rm_storageaccount_info.py | 831 +++ .../plugins/modules/azure_rm_storageblob.py | 645 ++ .../plugins/modules/azure_rm_storageshare.py | 356 + .../plugins/modules/azure_rm_storageshare_info.py | 281 + .../plugins/modules/azure_rm_subnet.py | 693 ++ .../plugins/modules/azure_rm_subnet_info.py | 299 + .../plugins/modules/azure_rm_subscription_info.py | 223 + .../plugins/modules/azure_rm_trafficmanager.py | 576 ++ .../modules/azure_rm_trafficmanagerendpoint.py | 371 + .../azure_rm_trafficmanagerendpoint_info.py | 307 + .../modules/azure_rm_trafficmanagerprofile.py | 460 ++ .../modules/azure_rm_trafficmanagerprofile_info.py | 421 ++ .../plugins/modules/azure_rm_virtualhub.py | 767 +++ .../plugins/modules/azure_rm_virtualhub_info.py | 620 ++ .../modules/azure_rm_virtualhubconnection.py | 503 ++ .../modules/azure_rm_virtualhubconnection_info.py | 270 + .../plugins/modules/azure_rm_virtualmachine.py | 2544 +++++++ .../modules/azure_rm_virtualmachine_info.py | 543 ++ .../modules/azure_rm_virtualmachineextension.py | 358 + .../azure_rm_virtualmachineextension_info.py | 249 + .../modules/azure_rm_virtualmachineimage_info.py | 256 + .../modules/azure_rm_virtualmachinescaleset.py | 1489 ++++ .../azure_rm_virtualmachinescaleset_info.py | 449 ++ .../azure_rm_virtualmachinescalesetextension.py | 296 + ...zure_rm_virtualmachinescalesetextension_info.py | 223 + .../azure_rm_virtualmachinescalesetinstance.py | 319 + ...azure_rm_virtualmachinescalesetinstance_info.py | 243 + .../modules/azure_rm_virtualmachinesize_info.py | 158 + .../plugins/modules/azure_rm_virtualnetwork.py | 409 ++ .../modules/azure_rm_virtualnetwork_info.py | 354 + .../modules/azure_rm_virtualnetworkgateway.py | 408 ++ .../modules/azure_rm_virtualnetworkpeering.py | 467 ++ .../modules/azure_rm_virtualnetworkpeering_info.py | 260 + .../plugins/modules/azure_rm_virtualwan.py | 399 ++ .../plugins/modules/azure_rm_virtualwan_info.py | 239 + .../plugins/modules/azure_rm_vmbackuppolicy.py | 458 ++ .../modules/azure_rm_vmbackuppolicy_info.py | 257 + .../modules/azure_rm_vmssnetworkinterface_info.py | 399 ++ .../plugins/modules/azure_rm_vpnsite.py | 606 ++ .../plugins/modules/azure_rm_vpnsite_info.py | 263 + .../plugins/modules/azure_rm_vpnsitelink_info.py | 205 + .../plugins/modules/azure_rm_webapp.py | 1089 +++ .../plugins/modules/azure_rm_webapp_info.py | 516 ++ .../modules/azure_rm_webappaccessrestriction.py | 388 ++ .../azure_rm_webappaccessrestriction_info.py | 207 + .../plugins/modules/azure_rm_webappslot.py | 1063 +++ .../modules/azure_rm_webappvnetconnection.py | 259 + .../modules/azure_rm_webappvnetconnection_info.py | 164 + .../azure/azcollection/pr-pipelines.yml | 264 + .../azure/azcollection/release-pipelines.yml | 29 + .../azure/azcollection/requirements-azure.txt | 52 + .../azcollection/sanity-requirements-azure.txt | 5 + .../azure/azcollection/shippable.yml | 40 + .../azure/azcollection/tests/config.yml | 2 + .../targets/azure_rm_account_info/aliases | 1 + .../targets/azure_rm_account_info/meta/main.yml | 2 + .../targets/azure_rm_account_info/tasks/main.yml | 17 + .../tests/integration/targets/azure_rm_acs/aliases | 4 + .../integration/targets/azure_rm_acs/meta/main.yml | 2 + .../targets/azure_rm_acs/tasks/main.yml | 149 + .../targets/azure_rm_adapplication/aliases | 4 + .../targets/azure_rm_adapplication/meta/main.yml | 2 + .../targets/azure_rm_adapplication/tasks/main.yml | 91 + .../integration/targets/azure_rm_adgroup/aliases | 4 + .../targets/azure_rm_adgroup/meta/main.yml | 2 + .../targets/azure_rm_adgroup/tasks/main.yml | 239 + .../targets/azure_rm_adpassword/aliases | 4 + .../targets/azure_rm_adpassword/meta/main.yml | 2 + .../targets/azure_rm_adpassword/tasks/main.yml | 103 + .../targets/azure_rm_adserviceprincipal/aliases | 4 + .../azure_rm_adserviceprincipal/meta/main.yml | 2 + .../azure_rm_adserviceprincipal/tasks/main.yml | 72 + .../integration/targets/azure_rm_aduser/aliases | 3 + .../targets/azure_rm_aduser/meta/main.yml | 2 + .../targets/azure_rm_aduser/tasks/main.yml | 165 + .../tests/integration/targets/azure_rm_aks/aliases | 3 + .../integration/targets/azure_rm_aks/meta/main.yml | 2 + .../targets/azure_rm_aks/tasks/main.yml | 572 ++ .../targets/azure_rm_aks/tasks/minimal-cluster.yml | 136 + .../targets/azure_rm_aksagentpool/aliases | 3 + .../targets/azure_rm_aksagentpool/meta/main.yml | 2 + .../targets/azure_rm_aksagentpool/tasks/main.yml | 172 + .../targets/azure_rm_apimanagement/aliases | 4 + .../targets/azure_rm_apimanagement/meta/main.yml | 2 + .../targets/azure_rm_apimanagement/tasks/main.yml | 139 + .../targets/azure_rm_apimanagementservice/aliases | 4 + .../azure_rm_apimanagementservice/meta/main.yml | 2 + .../azure_rm_apimanagementservice/tasks/main.yml | 58 + .../targets/azure_rm_appgateway/aliases | 4 + .../targets/azure_rm_appgateway/files/cert1.txt | 1 + .../targets/azure_rm_appgateway/files/cert2.txt | 1 + .../targets/azure_rm_appgateway/files/cert3b64.txt | 1 + .../targets/azure_rm_appgateway/meta/main.yml | 2 + .../targets/azure_rm_appgateway/tasks/main.yml | 2628 +++++++ .../targets/azure_rm_appserviceplan/aliases | 4 + .../targets/azure_rm_appserviceplan/meta/main.yml | 2 + .../targets/azure_rm_appserviceplan/tasks/main.yml | 116 + .../targets/azure_rm_automationaccount/aliases | 4 + .../azure_rm_automationaccount/meta/main.yml | 2 + .../azure_rm_automationaccount/tasks/main.yml | 88 + .../targets/azure_rm_automationrunbook/aliases | 5 + .../azure_rm_automationrunbook/meta/main.yml | 2 + .../azure_rm_automationrunbook/tasks/main.yml | 139 + .../integration/targets/azure_rm_autoscale/aliases | 4 + .../targets/azure_rm_autoscale/meta/main.yml | 2 + .../targets/azure_rm_autoscale/tasks/main.yml | 221 + .../targets/azure_rm_availabilityset/aliases | 4 + .../targets/azure_rm_availabilityset/meta/main.yml | 2 + .../azure_rm_availabilityset/tasks/main.yml | 193 + .../targets/azure_rm_azurefirewall/aliases | 3 + .../targets/azure_rm_azurefirewall/meta/main.yml | 2 + .../targets/azure_rm_azurefirewall/tasks/main.yml | 277 + .../targets/azure_rm_backupazurevm/aliases | 4 + .../targets/azure_rm_backupazurevm/meta/main.yml | 2 + .../targets/azure_rm_backupazurevm/tasks/main.yml | 76 + .../targets/azure_rm_backuppolicy/aliases | 3 + .../targets/azure_rm_backuppolicy/meta/main.yml | 2 + .../targets/azure_rm_backuppolicy/tasks/main.yml | 168 + .../targets/azure_rm_bastionhost/aliases | 3 + .../targets/azure_rm_bastionhost/meta/main.yml | 2 + .../targets/azure_rm_bastionhost/tasks/main.yml | 173 + .../targets/azure_rm_batchaccount/aliases | 3 + .../targets/azure_rm_batchaccount/meta/main.yml | 2 + .../targets/azure_rm_batchaccount/tasks/main.yml | 76 + .../targets/azure_rm_cdnprofile/aliases | 5 + .../targets/azure_rm_cdnprofile/meta/main.yml | 2 + .../targets/azure_rm_cdnprofile/tasks/main.yml | 276 + .../targets/azure_rm_cognitivesearch/aliases | 3 + .../targets/azure_rm_cognitivesearch/meta/main.yml | 2 + .../azure_rm_cognitivesearch/tasks/main.yml | 194 + .../targets/azure_rm_containerinstance/aliases | 4 + .../azure_rm_containerinstance/meta/main.yml | 2 + .../azure_rm_containerinstance/tasks/main.yml | 356 + .../targets/azure_rm_containerregistry/aliases | 4 + .../azure_rm_containerregistry/meta/main.yml | 2 + .../azure_rm_containerregistry/tasks/main.yml | 116 + .../targets/azure_rm_containerregistrytag/aliases | 3 + .../azure_rm_containerregistrytag/meta/main.yml | 2 + .../azure_rm_containerregistrytag/tasks/main.yml | 366 + .../targets/azure_rm_cosmosdbaccount/aliases | 4 + .../targets/azure_rm_cosmosdbaccount/meta/main.yml | 2 + .../azure_rm_cosmosdbaccount/tasks/main.yml | 365 + .../targets/azure_rm_datafactory/aliases | 3 + .../targets/azure_rm_datafactory/meta/main.yml | 2 + .../targets/azure_rm_datafactory/tasks/main.yml | 76 + .../targets/azure_rm_datalakestore/aliases | 3 + .../targets/azure_rm_datalakestore/meta/main.yml | 2 + .../targets/azure_rm_datalakestore/tasks/main.yml | 203 + .../targets/azure_rm_ddosprotectionplan/aliases | 3 + .../azure_rm_ddosprotectionplan/meta/main.yml | 2 + .../azure_rm_ddosprotectionplan/tasks/main.yml | 82 + .../targets/azure_rm_deployment/aliases | 4 + .../targets/azure_rm_deployment/meta/main.yml | 2 + .../targets/azure_rm_deployment/tasks/main.yml | 70 + .../targets/azure_rm_devtestlab/aliases | 17 + .../targets/azure_rm_devtestlab/meta/main.yml | 2 + .../targets/azure_rm_devtestlab/tasks/main.yml | 218 + .../targets/azure_rm_diskencryptionset/aliases | 3 + .../azure_service_principal_attribute.py | 94 + .../azure_rm_diskencryptionset/meta/main.yml | 2 + .../azure_rm_diskencryptionset/tasks/main.yml | 138 + .../targets/azure_rm_dnsrecordset/aliases | 3 + .../targets/azure_rm_dnsrecordset/meta/main.yml | 2 + .../targets/azure_rm_dnsrecordset/tasks/main.yml | 207 + .../integration/targets/azure_rm_dnszone/aliases | 6 + .../targets/azure_rm_dnszone/meta/main.yml | 2 + .../targets/azure_rm_dnszone/tasks/main.yml | 300 + .../integration/targets/azure_rm_eventhub/aliases | 3 + .../targets/azure_rm_eventhub/meta/main.yml | 2 + .../targets/azure_rm_eventhub/tasks/main.yml | 145 + .../targets/azure_rm_expressroute/aliases | 3 + .../targets/azure_rm_expressroute/meta/main.yml | 2 + .../targets/azure_rm_expressroute/tasks/main.yml | 119 + .../targets/azure_rm_firewallpolicy/aliases | 3 + .../targets/azure_rm_firewallpolicy/meta/main.yml | 2 + .../targets/azure_rm_firewallpolicy/tasks/main.yml | 100 + .../targets/azure_rm_functionapp/aliases | 3 + .../targets/azure_rm_functionapp/meta/main.yml | 2 + .../targets/azure_rm_functionapp/tasks/main.yml | 131 + .../integration/targets/azure_rm_gallery/aliases | 6 + .../targets/azure_rm_gallery/meta/main.yml | 2 + .../targets/azure_rm_gallery/tasks/main.yml | 370 + .../targets/azure_rm_hdinsightcluster/aliases | 6 + .../azure_rm_hdinsightcluster/meta/main.yml | 2 + .../azure_rm_hdinsightcluster/tasks/main.yml | 244 + .../integration/targets/azure_rm_hostgroup/aliases | 3 + .../targets/azure_rm_hostgroup/meta/main.yml | 2 + .../targets/azure_rm_hostgroup/tasks/main.yml | 87 + .../integration/targets/azure_rm_image/aliases | 4 + .../targets/azure_rm_image/meta/main.yml | 2 + .../targets/azure_rm_image/tasks/main.yml | 178 + .../integration/targets/azure_rm_iothub/aliases | 3 + .../targets/azure_rm_iothub/meta/main.yml | 2 + .../targets/azure_rm_iothub/tasks/main.yml | 178 + .../integration/targets/azure_rm_ipgroup/aliases | 3 + .../targets/azure_rm_ipgroup/meta/main.yml | 2 + .../targets/azure_rm_ipgroup/tasks/main.yml | 106 + .../integration/targets/azure_rm_keyvault/aliases | 5 + .../azure_service_principal_attribute.py | 94 + .../targets/azure_rm_keyvault/meta/main.yml | 2 + .../targets/azure_rm_keyvault/tasks/main.yml | 277 + .../targets/azure_rm_keyvaultkey/aliases | 3 + .../azure_service_principal_attribute.py | 94 + .../targets/azure_rm_keyvaultkey/meta/main.yml | 2 + .../targets/azure_rm_keyvaultkey/tasks/main.yml | 186 + .../targets/azure_rm_keyvaultsecret/aliases | 3 + .../azure_service_principal_attribute.py | 94 + .../targets/azure_rm_keyvaultsecret/meta/main.yml | 2 + .../targets/azure_rm_keyvaultsecret/tasks/main.yml | 96 + .../targets/azure_rm_loadbalancer/aliases | 3 + .../targets/azure_rm_loadbalancer/meta/main.yml | 2 + .../targets/azure_rm_loadbalancer/tasks/main.yml | 333 + .../targets/azure_rm_loganalyticsworkspace/aliases | 5 + .../azure_rm_loganalyticsworkspace/meta/main.yml | 2 + .../azure_rm_loganalyticsworkspace/tasks/main.yml | 183 + .../targets/azure_rm_manageddisk/aliases | 4 + .../targets/azure_rm_manageddisk/meta/main.yml | 2 + .../targets/azure_rm_manageddisk/tasks/main.yml | 260 + .../targets/azure_rm_managementgroup/aliases | 3 + .../targets/azure_rm_managementgroup/meta/main.yml | 2 + .../azure_rm_managementgroup/tasks/main.yml | 35 + .../targets/azure_rm_mariadbserver/aliases | 8 + .../targets/azure_rm_mariadbserver/meta/main.yml | 2 + .../targets/azure_rm_mariadbserver/tasks/main.yml | 640 ++ .../azure_rm_monitordiagnosticsetting/aliases | 3 + .../meta/main.yml | 2 + .../tasks/main.yml | 504 ++ .../targets/azure_rm_monitorlogprofile/aliases | 3 + .../azure_rm_monitorlogprofile/meta/main.yml | 2 + .../azure_rm_monitorlogprofile/tasks/main.yml | 133 + .../targets/azure_rm_multiplemanageddisks/aliases | 5 + .../defaults/main.yml | 30 + .../azure_rm_multiplemanageddisks/meta/main.yml | 2 + .../azure_rm_multiplemanageddisks/tasks/main.yml | 2 + .../tasks/test_async.yml | 160 + .../tasks/test_shared.yml | 341 + .../templates/disk_config.j2 | 11 + .../targets/azure_rm_mysqlserver/aliases | 10 + .../targets/azure_rm_mysqlserver/meta/main.yml | 2 + .../targets/azure_rm_mysqlserver/tasks/main.yml | 675 ++ .../targets/azure_rm_natgateway/aliases | 4 + .../targets/azure_rm_natgateway/meta/main.yml | 2 + .../targets/azure_rm_natgateway/tasks/main.yml | 346 + .../targets/azure_rm_networkinterface/aliases | 4 + .../azure_rm_networkinterface/meta/main.yml | 2 + .../azure_rm_networkinterface/tasks/main.yml | 765 +++ .../targets/azure_rm_notificationhub/aliases | 3 + .../targets/azure_rm_notificationhub/meta/main.yml | 2 + .../azure_rm_notificationhub/tasks/main.yml | 153 + .../azure_rm_openshiftmanagedcluster/aliases | 4 + .../azure_rm_openshiftmanagedcluster/meta/main.yml | 2 + .../tasks/main.yml | 118 + .../targets/azure_rm_postgresqlserver/aliases | 11 + .../azure_rm_postgresqlserver/meta/main.yml | 2 + .../azure_rm_postgresqlserver/tasks/main.yml | 615 ++ .../targets/azure_rm_privatednsrecordset/aliases | 3 + .../azure_rm_privatednsrecordset/meta/main.yml | 2 + .../azure_rm_privatednsrecordset/tasks/main.yml | 259 + .../targets/azure_rm_privatednszone/aliases | 4 + .../targets/azure_rm_privatednszone/meta/main.yml | 2 + .../targets/azure_rm_privatednszone/tasks/main.yml | 77 + .../targets/azure_rm_privatednszonelink/aliases | 3 + .../azure_rm_privatednszonelink/meta/main.yml | 2 + .../azure_rm_privatednszonelink/tasks/main.yml | 126 + .../targets/azure_rm_privateendpoint/aliases | 3 + .../targets/azure_rm_privateendpoint/meta/main.yml | 2 + .../azure_rm_privateendpoint/tasks/main.yml | 157 + .../azure_rm_privateendpointdnszonegroup/aliases | 3 + .../meta/main.yml | 2 + .../tasks/main.yml | 255 + .../targets/azure_rm_privatelinkservice/aliases | 3 + .../azure_rm_privatelinkservice/meta/main.yml | 2 + .../azure_rm_privatelinkservice/tasks/main.yml | 284 + .../azure_rm_proximityplacementgroup/aliases | 3 + .../azure_rm_proximityplacementgroup/meta/main.yml | 2 + .../tasks/main.yml | 76 + .../targets/azure_rm_publicipaddress/aliases | 4 + .../targets/azure_rm_publicipaddress/meta/main.yml | 2 + .../azure_rm_publicipaddress/tasks/main.yml | 139 + .../targets/azure_rm_recoveryservicesvault/aliases | 3 + .../azure_rm_recoveryservicesvault/meta/main.yml | 2 + .../azure_rm_recoveryservicesvault/tasks/main.yml | 55 + .../targets/azure_rm_rediscache/aliases | 6 + .../targets/azure_rm_rediscache/meta/main.yml | 2 + .../targets/azure_rm_rediscache/tasks/main.yml | 492 ++ .../azure_rm_registrationassignment/aliases | 3 + .../azure_rm_registrationassignment/meta/main.yml | 2 + .../azure_rm_registrationassignment/tasks/main.yml | 78 + .../azure_rm_registrationdefinition/aliases | 3 + .../azure_rm_registrationdefinition/meta/main.yml | 2 + .../azure_rm_registrationdefinition/tasks/main.yml | 134 + .../integration/targets/azure_rm_resource/aliases | 3 + .../targets/azure_rm_resource/meta/main.yml | 2 + .../targets/azure_rm_resource/tasks/main.yml | 158 + .../targets/azure_rm_resourcegroup/aliases | 4 + .../targets/azure_rm_resourcegroup/meta/main.yml | 2 + .../targets/azure_rm_resourcegroup/tasks/main.yml | 50 + .../targets/azure_rm_roleassignment/aliases | 3 + .../targets/azure_rm_roleassignment/meta/main.yml | 2 + .../targets/azure_rm_roleassignment/tasks/main.yml | 220 + .../targets/azure_rm_roledefinition/aliases | 3 + .../targets/azure_rm_roledefinition/meta/main.yml | 2 + .../targets/azure_rm_roledefinition/tasks/main.yml | 211 + .../targets/azure_rm_routetable/aliases | 3 + .../targets/azure_rm_routetable/meta/main.yml | 2 + .../targets/azure_rm_routetable/tasks/main.yml | 195 + .../targets/azure_rm_securitygroup/aliases | 4 + .../targets/azure_rm_securitygroup/meta/main.yml | 2 + .../targets/azure_rm_securitygroup/tasks/main.yml | 377 + .../targets/azure_rm_servicebus/aliases | 3 + .../targets/azure_rm_servicebus/meta/main.yml | 2 + .../targets/azure_rm_servicebus/tasks/main.yml | 181 + .../targets/azure_rm_sqlmanagedinstance/aliases | 3 + .../azure_rm_sqlmanagedinstance/meta/main.yml | 2 + .../azure_rm_sqlmanagedinstance/tasks/main.yml | 182 + .../integration/targets/azure_rm_sqlserver/aliases | 8 + .../targets/azure_rm_sqlserver/meta/main.yml | 2 + .../targets/azure_rm_sqlserver/tasks/main.yml | 840 +++ .../targets/azure_rm_storageaccount/aliases | 3 + .../targets/azure_rm_storageaccount/meta/main.yml | 2 + .../targets/azure_rm_storageaccount/tasks/main.yml | 592 ++ .../targets/azure_rm_storageblob/aliases | 3 + .../targets/azure_rm_storageblob/files/Ratings.png | Bin 0 -> 35164 bytes .../targets/azure_rm_storageblob/meta/main.yml | 2 + .../targets/azure_rm_storageblob/tasks/main.yml | 110 + .../targets/azure_rm_storageshare/aliases | 3 + .../targets/azure_rm_storageshare/meta/main.yml | 2 + .../targets/azure_rm_storageshare/tasks/main.yml | 126 + .../integration/targets/azure_rm_subnet/aliases | 3 + .../targets/azure_rm_subnet/meta/main.yml | 2 + .../targets/azure_rm_subnet/tasks/main.yml | 320 + .../targets/azure_rm_subscription/aliases | 3 + .../targets/azure_rm_subscription/meta/main.yml | 2 + .../targets/azure_rm_subscription/tasks/main.yml | 24 + .../targets/azure_rm_trafficmanagerprofile/aliases | 3 + .../azure_rm_trafficmanagerprofile/meta/main.yml | 2 + .../azure_rm_trafficmanagerprofile/tasks/main.yml | 289 + .../targets/azure_rm_virtualhub/aliases | 3 + .../targets/azure_rm_virtualhub/meta/main.yml | 2 + .../targets/azure_rm_virtualhub/tasks/main.yml | 94 + .../targets/azure_rm_virtualhubconnection/aliases | 3 + .../azure_rm_virtualhubconnection/meta/main.yml | 2 + .../azure_rm_virtualhubconnection/tasks/main.yml | 163 + .../targets/azure_rm_virtualmachine/aliases | 4 + .../targets/azure_rm_virtualmachine/inventory.yml | 74 + .../targets/azure_rm_virtualmachine/main.yml | 7 + .../targets/azure_rm_virtualmachine/runme.sh | 5 + .../tasks/azure_test_deallocate.yml | 103 + .../tasks/azure_test_dual_nic.yml | 146 + .../tasks/azure_test_ephemeral_os.yml | 130 + .../tasks/azure_test_image_latest.yml | 69 + .../tasks/azure_test_image_specific.yml | 61 + .../tasks/azure_test_invalid.yml | 35 + .../tasks/azure_test_minimal.yml | 133 + .../tasks/azure_test_minimal_manageddisk.yml | 89 + .../tasks/azure_test_no_nsg.yml | 83 + .../tasks/azure_test_no_public_ip.yml | 44 + .../tasks/azure_test_public_ip.yml | 331 + .../tasks/azure_test_spot.yml | 83 + .../azure_rm_virtualmachine/tasks/setup.yml | 26 + .../azure_rm_virtualmachineextension/aliases | 5 + .../files/test-protected-settings.json | 13 + .../files/test-public-settings.json | 750 ++ .../azure_rm_virtualmachineextension/meta/main.yml | 2 + .../tasks/main.yml | 270 + .../azure_rm_virtualmachineimage_info/aliases | 4 + .../meta/main.yml | 2 + .../tasks/main.yml | 55 + .../azure_rm_virtualmachinescaleset/aliases | 7 + .../azure_rm_virtualmachinescaleset/meta/main.yml | 2 + .../azure_rm_virtualmachinescaleset/tasks/main.yml | 897 +++ .../azure_rm_virtualmachinesize_info/aliases | 3 + .../azure_rm_virtualmachinesize_info/meta/main.yml | 2 + .../tasks/main.yml | 20 + .../targets/azure_rm_virtualnetwork/aliases | 3 + .../targets/azure_rm_virtualnetwork/meta/main.yml | 2 + .../targets/azure_rm_virtualnetwork/tasks/main.yml | 187 + .../targets/azure_rm_virtualnetworkgateway/aliases | 3 + .../azure_rm_virtualnetworkgateway/meta/main.yml | 2 + .../azure_rm_virtualnetworkgateway/tasks/main.yml | 218 + .../targets/azure_rm_virtualnetworkpeering/aliases | 4 + .../azure_rm_virtualnetworkpeering/meta/main.yml | 2 + .../azure_rm_virtualnetworkpeering/tasks/main.yml | 126 + .../targets/azure_rm_virtualwan/aliases | 3 + .../targets/azure_rm_virtualwan/meta/main.yml | 2 + .../targets/azure_rm_virtualwan/tasks/main.yml | 61 + .../targets/azure_rm_vmbackuppolicy/aliases | 3 + .../targets/azure_rm_vmbackuppolicy/meta/main.yml | 2 + .../targets/azure_rm_vmbackuppolicy/tasks/main.yml | 78 + .../integration/targets/azure_rm_vpnsite/aliases | 3 + .../targets/azure_rm_vpnsite/meta/main.yml | 2 + .../targets/azure_rm_vpnsite/tasks/main.yml | 102 + .../integration/targets/azure_rm_webapp/aliases | 5 + .../targets/azure_rm_webapp/meta/main.yml | 2 + .../targets/azure_rm_webapp/tasks/main.yml | 537 ++ .../azure_rm_webappaccessrestriction/aliases | 3 + .../azure_rm_webappaccessrestriction/meta/main.yml | 2 + .../tasks/main.yml | 268 + .../targets/azure_rm_webappvnetconnection/aliases | 3 + .../azure_rm_webappvnetconnection/meta/main.yml | 2 + .../azure_rm_webappvnetconnection/tasks/main.yml | 129 + .../integration/targets/azure_rm_workspace/aliases | 5 + .../targets/azure_rm_workspace/meta/main.yml | 2 + .../targets/azure_rm_workspace/tasks/main.yml | 128 + .../integration/targets/inventory_azure/aliases | 2 + .../playbooks/create_inventory_config.yml | 11 + .../playbooks/empty_inventory_config.yml | 9 + .../targets/inventory_azure/playbooks/setup.yml | 48 + .../targets/inventory_azure/playbooks/teardown.yml | 40 + .../inventory_azure/playbooks/test_inventory.yml | 16 + .../targets/inventory_azure/playbooks/vars.yml | 14 + .../integration/targets/inventory_azure/runme.sh | 24 + .../targets/inventory_azure/templates/basic.yml | 3 + .../targets/inventory_azure/templates/basic2.yml | 3 + .../targets/inventory_azure/test.azure_rm.yml | 0 .../tests/integration/targets/setup_azure/aliases | 0 .../azcollection/tests/sanity/ignore-2.10.txt | 270 + .../azcollection/tests/sanity/ignore-2.11.txt | 270 + .../azcollection/tests/sanity/ignore-2.12.txt | 270 + .../azcollection/tests/sanity/ignore-2.13.txt | 269 + .../azcollection/tests/sanity/ignore-2.14.txt | 266 + .../azure/azcollection/tests/sanity/ignore-2.9.txt | 165 + .../azure/azcollection/tests/utils/ado/ado.sh | 115 + .../azcollection/tests/utils/shippable/azure.sh | 34 + .../tests/utils/shippable/check_matrix.py | 114 + .../azcollection/tests/utils/shippable/cloud.sh | 34 + .../azcollection/tests/utils/shippable/sanity.sh | 7 + .../tests/utils/shippable/shippable.sh | 124 + .../azcollection/tests/utils/shippable/timing.py | 16 + .../azcollection/tests/utils/shippable/timing.sh | 5 + 684 files changed, 153458 insertions(+) create mode 100644 ansible_collections/azure/azcollection/.gitignore create mode 100644 ansible_collections/azure/azcollection/CHANGELOG.md create mode 100644 ansible_collections/azure/azcollection/CONTRIBUTING.md create mode 100644 ansible_collections/azure/azcollection/CredScanSuppressions.json create mode 100644 ansible_collections/azure/azcollection/FILES.json create mode 100644 ansible_collections/azure/azcollection/LICENSE create mode 100644 ansible_collections/azure/azcollection/MANIFEST.json create mode 100644 ansible_collections/azure/azcollection/README.md create mode 100644 ansible_collections/azure/azcollection/ansible.cfg create mode 100644 ansible_collections/azure/azcollection/azure-pipelines.yml create mode 100644 ansible_collections/azure/azcollection/meta/execution-environment.yml create mode 100644 ansible_collections/azure/azcollection/meta/runtime.yml create mode 100644 ansible_collections/azure/azcollection/plugins/doc_fragments/azure.py create mode 100644 ansible_collections/azure/azcollection/plugins/doc_fragments/azure_rm.py create mode 100644 ansible_collections/azure/azcollection/plugins/doc_fragments/azure_tags.py create mode 100644 ansible_collections/azure/azcollection/plugins/inventory/azure_rm.py create mode 100644 ansible_collections/azure/azcollection/plugins/lookup/azure_keyvault_secret.py create mode 100644 ansible_collections/azure/azcollection/plugins/module_utils/azure_rm_common.py create mode 100644 ansible_collections/azure/azcollection/plugins/module_utils/azure_rm_common_ext.py create mode 100644 ansible_collections/azure/azcollection/plugins/module_utils/azure_rm_common_rest.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_account_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_adapplication.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_adapplication_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_adgroup.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_adgroup_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_adpassword.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_adpassword_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_adserviceprincipal.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_adserviceprincipal_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_aduser.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_aduser_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_aks.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_aks_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_aksagentpool.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_aksagentpool_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_aksagentpoolversion_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_aksupgrade_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_aksversion_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_apimanagement.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_apimanagement_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_apimanagementservice.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_apimanagementservice_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_appgateway.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_appgateway_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_applicationsecuritygroup.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_applicationsecuritygroup_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_appserviceplan.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_appserviceplan_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_automationaccount.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_automationaccount_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_automationrunbook.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_automationrunbook_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_autoscale.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_autoscale_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_availabilityset.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_availabilityset_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_azurefirewall.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_azurefirewall_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_backupazurevm.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_backupazurevm_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_backuppolicy.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_backuppolicy_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_bastionhost.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_bastionhost_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_batchaccount.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_cdnendpoint.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_cdnendpoint_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_cdnprofile.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_cdnprofile_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_cognitivesearch.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_cognitivesearch_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_containerinstance.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_containerinstance_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_containerregistry.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_containerregistry_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_containerregistryreplication.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_containerregistryreplication_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_containerregistrytag.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_containerregistrytag_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_containerregistrywebhook.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_containerregistrywebhook_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_cosmosdbaccount.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_cosmosdbaccount_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_datafactory.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_datafactory_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_datalakestore.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_datalakestore_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_ddosprotectionplan.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_ddosprotectionplan_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_deployment.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_deployment_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlab.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlab_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabarmtemplate_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabartifact_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabartifactsource.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabartifactsource_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabcustomimage.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabcustomimage_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabenvironment.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabenvironment_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabpolicy.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabpolicy_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabschedule.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabschedule_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabvirtualmachine.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabvirtualmachine_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabvirtualnetwork.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabvirtualnetwork_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_diskencryptionset.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_diskencryptionset_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_dnsrecordset.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_dnsrecordset_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_dnszone.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_dnszone_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_eventhub.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_eventhub_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_expressroute.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_expressroute_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_firewallpolicy.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_firewallpolicy_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_functionapp.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_functionapp_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_gallery.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_gallery_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_galleryimage.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_galleryimage_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_galleryimageversion.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_galleryimageversion_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_hdinsightcluster.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_hdinsightcluster_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_hostgroup.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_hostgroup_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_image.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_image_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_iotdevice.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_iotdevice_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_iotdevicemodule.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_iothub.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_iothub_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_iothubconsumergroup.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_ipgroup.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_ipgroup_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_keyvault.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_keyvault_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_keyvaultkey.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_keyvaultkey_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_keyvaultsecret.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_keyvaultsecret_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_loadbalancer.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_loadbalancer_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_lock.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_lock_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_loganalyticsworkspace.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_loganalyticsworkspace_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_manageddisk.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_manageddisk_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_managementgroup.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_managementgroup_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_mariadbconfiguration.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_mariadbconfiguration_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_mariadbdatabase.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_mariadbdatabase_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_mariadbfirewallrule.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_mariadbfirewallrule_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_mariadbserver.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_mariadbserver_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_monitordiagnosticsetting.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_monitordiagnosticsetting_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_monitorlogprofile.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_multiplemanageddisks.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_mysqlconfiguration.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_mysqlconfiguration_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_mysqldatabase.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_mysqldatabase_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_mysqlfirewallrule.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_mysqlfirewallrule_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_mysqlserver.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_mysqlserver_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_natgateway.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_natgateway_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_networkinterface.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_networkinterface_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_notificationhub.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_notificationhub_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_openshiftmanagedcluster.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_openshiftmanagedcluster_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlconfiguration.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlconfiguration_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqldatabase.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqldatabase_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlfirewallrule.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlfirewallrule_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlserver.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlserver_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_privatednsrecordset.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_privatednsrecordset_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_privatednszone.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_privatednszone_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_privatednszonelink.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_privatednszonelink_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_privateendpoint.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_privateendpoint_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_privateendpointconnection.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_privateendpointconnection_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_privateendpointdnszonegroup.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_privateendpointdnszonegroup_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_privatelinkservice.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_privatelinkservice_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_proximityplacementgroup.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_proximityplacementgroup_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_publicipaddress.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_publicipaddress_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_recoveryservicesvault.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_recoveryservicesvault_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_rediscache.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_rediscache_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_rediscachefirewallrule.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_registrationassignment.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_registrationassignment_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_registrationdefinition.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_registrationdefinition_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_resource.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_resource_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_resourcegroup.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_resourcegroup_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_roleassignment.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_roleassignment_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_roledefinition.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_roledefinition_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_route.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_route_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_routetable.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_routetable_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_securitygroup.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_securitygroup_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_servicebus.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_servicebus_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_servicebusqueue.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_servicebussaspolicy.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_servicebustopic.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_servicebustopicsubscription.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_snapshot.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqldatabase.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqldatabase_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlelasticpool.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlelasticpool_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlfirewallrule.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlfirewallrule_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmanagedinstance.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmanagedinstance_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlserver.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlserver_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_storageaccount.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_storageaccount_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_storageblob.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_storageshare.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_storageshare_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_subnet.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_subnet_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_subscription_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_trafficmanager.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_trafficmanagerendpoint.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_trafficmanagerendpoint_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_trafficmanagerprofile.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_trafficmanagerprofile_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualhub.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualhub_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualhubconnection.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualhubconnection_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachine.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachine_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachineextension.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachineextension_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachineimage_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinescaleset.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinescaleset_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinescalesetextension.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinescalesetextension_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinescalesetinstance.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinescalesetinstance_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinesize_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualnetwork.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualnetwork_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualnetworkgateway.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualnetworkpeering.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualnetworkpeering_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualwan.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualwan_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_vmbackuppolicy.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_vmbackuppolicy_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_vmssnetworkinterface_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_vpnsite.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_vpnsite_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_vpnsitelink_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_webapp.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_webapp_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_webappaccessrestriction.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_webappaccessrestriction_info.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_webappslot.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_webappvnetconnection.py create mode 100644 ansible_collections/azure/azcollection/plugins/modules/azure_rm_webappvnetconnection_info.py create mode 100644 ansible_collections/azure/azcollection/pr-pipelines.yml create mode 100644 ansible_collections/azure/azcollection/release-pipelines.yml create mode 100644 ansible_collections/azure/azcollection/requirements-azure.txt create mode 100644 ansible_collections/azure/azcollection/sanity-requirements-azure.txt create mode 100644 ansible_collections/azure/azcollection/shippable.yml create mode 100644 ansible_collections/azure/azcollection/tests/config.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_account_info/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_account_info/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_account_info/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_acs/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_acs/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_acs/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adapplication/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adapplication/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adapplication/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adgroup/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adgroup/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adgroup/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adpassword/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adpassword/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adpassword/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adserviceprincipal/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adserviceprincipal/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adserviceprincipal/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aduser/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aduser/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aduser/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aks/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aks/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aks/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aks/tasks/minimal-cluster.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aksagentpool/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aksagentpool/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aksagentpool/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_apimanagement/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_apimanagement/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_apimanagement/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_apimanagementservice/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_apimanagementservice/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_apimanagementservice/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_appgateway/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_appgateway/files/cert1.txt create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_appgateway/files/cert2.txt create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_appgateway/files/cert3b64.txt create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_appgateway/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_appgateway/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_appserviceplan/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_appserviceplan/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_appserviceplan/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_automationaccount/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_automationaccount/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_automationaccount/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_automationrunbook/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_automationrunbook/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_automationrunbook/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_autoscale/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_autoscale/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_autoscale/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_availabilityset/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_availabilityset/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_availabilityset/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_azurefirewall/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_azurefirewall/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_azurefirewall/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_backupazurevm/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_backupazurevm/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_backupazurevm/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_backuppolicy/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_backuppolicy/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_backuppolicy/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_bastionhost/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_bastionhost/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_bastionhost/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_batchaccount/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_batchaccount/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_batchaccount/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cdnprofile/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cdnprofile/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cdnprofile/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cognitivesearch/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cognitivesearch/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cognitivesearch/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_containerinstance/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_containerinstance/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_containerinstance/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_containerregistry/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_containerregistry/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_containerregistry/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_containerregistrytag/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_containerregistrytag/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_containerregistrytag/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cosmosdbaccount/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cosmosdbaccount/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cosmosdbaccount/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_datafactory/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_datafactory/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_datafactory/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_datalakestore/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_datalakestore/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_datalakestore/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_ddosprotectionplan/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_ddosprotectionplan/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_ddosprotectionplan/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_deployment/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_deployment/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_deployment/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_devtestlab/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_devtestlab/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_devtestlab/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_diskencryptionset/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_diskencryptionset/lookup_plugins/azure_service_principal_attribute.py create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_diskencryptionset/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_diskencryptionset/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_dnsrecordset/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_dnsrecordset/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_dnsrecordset/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_dnszone/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_dnszone/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_dnszone/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_eventhub/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_eventhub/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_eventhub/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_expressroute/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_expressroute/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_expressroute/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_firewallpolicy/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_firewallpolicy/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_firewallpolicy/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_functionapp/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_functionapp/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_functionapp/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_gallery/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_gallery/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_gallery/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_hdinsightcluster/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_hdinsightcluster/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_hdinsightcluster/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_hostgroup/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_hostgroup/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_hostgroup/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_image/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_image/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_image/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_iothub/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_iothub/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_iothub/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_ipgroup/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_ipgroup/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_ipgroup/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvault/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvault/lookup_plugins/azure_service_principal_attribute.py create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvault/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvault/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvaultkey/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvaultkey/lookup_plugins/azure_service_principal_attribute.py create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvaultkey/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvaultkey/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvaultsecret/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvaultsecret/lookup_plugins/azure_service_principal_attribute.py create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvaultsecret/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvaultsecret/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_loadbalancer/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_loadbalancer/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_loadbalancer/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_loganalyticsworkspace/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_loganalyticsworkspace/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_loganalyticsworkspace/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_manageddisk/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_manageddisk/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_manageddisk/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_managementgroup/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_managementgroup/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_managementgroup/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_mariadbserver/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_mariadbserver/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_mariadbserver/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_monitordiagnosticsetting/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_monitordiagnosticsetting/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_monitordiagnosticsetting/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_monitorlogprofile/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_monitorlogprofile/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_monitorlogprofile/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_multiplemanageddisks/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_multiplemanageddisks/defaults/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_multiplemanageddisks/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_multiplemanageddisks/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_multiplemanageddisks/tasks/test_async.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_multiplemanageddisks/tasks/test_shared.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_multiplemanageddisks/templates/disk_config.j2 create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_mysqlserver/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_mysqlserver/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_mysqlserver/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_natgateway/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_natgateway/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_natgateway/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_networkinterface/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_networkinterface/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_networkinterface/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_notificationhub/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_notificationhub/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_notificationhub/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_openshiftmanagedcluster/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_openshiftmanagedcluster/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_openshiftmanagedcluster/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_postgresqlserver/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_postgresqlserver/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_postgresqlserver/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatednsrecordset/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatednsrecordset/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatednsrecordset/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatednszone/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatednszone/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatednszone/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatednszonelink/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatednszonelink/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatednszonelink/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privateendpoint/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privateendpoint/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privateendpoint/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privateendpointdnszonegroup/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privateendpointdnszonegroup/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privateendpointdnszonegroup/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatelinkservice/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatelinkservice/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatelinkservice/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_proximityplacementgroup/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_proximityplacementgroup/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_proximityplacementgroup/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_publicipaddress/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_publicipaddress/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_publicipaddress/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_recoveryservicesvault/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_recoveryservicesvault/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_recoveryservicesvault/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_rediscache/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_rediscache/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_rediscache/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_registrationassignment/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_registrationassignment/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_registrationassignment/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_registrationdefinition/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_registrationdefinition/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_registrationdefinition/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_resource/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_resource/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_resource/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_resourcegroup/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_resourcegroup/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_resourcegroup/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_roleassignment/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_roleassignment/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_roleassignment/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_roledefinition/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_roledefinition/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_roledefinition/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_routetable/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_routetable/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_routetable/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_securitygroup/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_securitygroup/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_securitygroup/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_servicebus/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_servicebus/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_servicebus/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_sqlmanagedinstance/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_sqlmanagedinstance/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_sqlmanagedinstance/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_sqlserver/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_sqlserver/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_sqlserver/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageaccount/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageaccount/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageaccount/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageblob/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageblob/files/Ratings.png create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageblob/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageblob/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageshare/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageshare/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageshare/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_subnet/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_subnet/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_subnet/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_subscription/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_subscription/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_subscription/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_trafficmanagerprofile/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_trafficmanagerprofile/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_trafficmanagerprofile/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualhub/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualhub/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualhub/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualhubconnection/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualhubconnection/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualhubconnection/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/inventory.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/runme.sh create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_deallocate.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_dual_nic.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_ephemeral_os.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_image_latest.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_image_specific.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_invalid.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_minimal.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_minimal_manageddisk.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_no_nsg.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_no_public_ip.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_public_ip.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_spot.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/setup.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachineextension/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachineextension/files/test-protected-settings.json create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachineextension/files/test-public-settings.json create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachineextension/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachineextension/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachineimage_info/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachineimage_info/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachineimage_info/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachinescaleset/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachinescaleset/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachinescaleset/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachinesize_info/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachinesize_info/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachinesize_info/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetwork/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetwork/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetwork/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetworkgateway/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetworkgateway/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetworkgateway/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetworkpeering/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetworkpeering/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetworkpeering/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualwan/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualwan/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualwan/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_vmbackuppolicy/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_vmbackuppolicy/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_vmbackuppolicy/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_vpnsite/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_vpnsite/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_vpnsite/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webapp/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webapp/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webapp/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webappaccessrestriction/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webappaccessrestriction/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webappaccessrestriction/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webappvnetconnection/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webappvnetconnection/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webappvnetconnection/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_workspace/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_workspace/meta/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_workspace/tasks/main.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/aliases create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/playbooks/create_inventory_config.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/playbooks/empty_inventory_config.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/playbooks/setup.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/playbooks/teardown.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/playbooks/test_inventory.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/playbooks/vars.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/runme.sh create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/templates/basic.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/templates/basic2.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/test.azure_rm.yml create mode 100644 ansible_collections/azure/azcollection/tests/integration/targets/setup_azure/aliases create mode 100644 ansible_collections/azure/azcollection/tests/sanity/ignore-2.10.txt create mode 100644 ansible_collections/azure/azcollection/tests/sanity/ignore-2.11.txt create mode 100644 ansible_collections/azure/azcollection/tests/sanity/ignore-2.12.txt create mode 100644 ansible_collections/azure/azcollection/tests/sanity/ignore-2.13.txt create mode 100644 ansible_collections/azure/azcollection/tests/sanity/ignore-2.14.txt create mode 100644 ansible_collections/azure/azcollection/tests/sanity/ignore-2.9.txt create mode 100644 ansible_collections/azure/azcollection/tests/utils/ado/ado.sh create mode 100644 ansible_collections/azure/azcollection/tests/utils/shippable/azure.sh create mode 100644 ansible_collections/azure/azcollection/tests/utils/shippable/check_matrix.py create mode 100644 ansible_collections/azure/azcollection/tests/utils/shippable/cloud.sh create mode 100644 ansible_collections/azure/azcollection/tests/utils/shippable/sanity.sh create mode 100644 ansible_collections/azure/azcollection/tests/utils/shippable/shippable.sh create mode 100644 ansible_collections/azure/azcollection/tests/utils/shippable/timing.py create mode 100644 ansible_collections/azure/azcollection/tests/utils/shippable/timing.sh (limited to 'ansible_collections/azure/azcollection') diff --git a/ansible_collections/azure/azcollection/.gitignore b/ansible_collections/azure/azcollection/.gitignore new file mode 100644 index 000000000..a5e318764 --- /dev/null +++ b/ansible_collections/azure/azcollection/.gitignore @@ -0,0 +1,9 @@ +*.pyc +tests/output/ +tests/integration/cloud-config-azure.ini +azure-azcollection-*.tar.gz +venv* +.venv* +.vscode +ansible_collections/ +.idea/ diff --git a/ansible_collections/azure/azcollection/CHANGELOG.md b/ansible_collections/azure/azcollection/CHANGELOG.md new file mode 100644 index 000000000..8de6021a3 --- /dev/null +++ b/ansible_collections/azure/azcollection/CHANGELOG.md @@ -0,0 +1,729 @@ +# Change Log + +## v1.16.0 (2023-5-31) + +### NEW MODULES + - azure_rm_vmssnetworkinterface_info: Add VMSS networkinterface to get VMSS network interface info ([#1125](https://github.com/ansible-collections/azure/pull/1125)) + +### FEATURE ENHANCEMENT + - azure_rm_managementgroup: Upgrade azure-mgmt-managements to 1.0.0 ([#1117](https://github.com/ansible-collections/azure/pull/1117)) + - azure_rm_managementgroup_info: Upgrade azure-mgmt-managements to 1.0.0 ([#1117](https://github.com/ansible-collections/azure/pull/1117)) + - azure_rm_servicebus: Support tags to azure_rm_servicebus.py ([#1114](https://github.com/ansible-collections/azure/pull/1114)) + - azure_rm_servicebusqueue: Add `max_message_size_in_kb` to azure_rm_servicebusqueue ([#1092](https://github.com/ansible-collections/azure/pull/1092)) + - azure_rm_servicebustopic: Add `max_message_size_in_kb` to azure_rm_servicebusqueue ([#1092](https://github.com/ansible-collections/azure/pull/1092)) + - plugins/doc_fragments/azure_rm: Update the description of `include_vm_resource_groups` ([#1077](https://github.com/ansible-collections/azure/pull/1077)) + - azure_rm_galleryimageversion: Fix append tags for azure_rm_galleryimageversion ([#1100](https://github.com/ansible-collections/azure/pull/1100)) + - azure_rm_lock: Add support for `notes` ([#1097](https://github.com/ansible-collections/azure/pull/1097)) + - azure_rm_devtestlab: Upgrade azure-mgmt-devtestlabs to 9.0.0 ([#958](https://github.com/ansible-collections/azure/pull/958)) + - azure_rm_virtualmachine: + - Upgrade azure-mgmt-marketplaceordering to 1.1.0 ([#940](https://github.com/ansible-collections/azure/pull/940)) + - Add support for new `managed_disk_type` type `UltraSSD_LRS` ([#1136](https://github.com/ansible-collections/azure/pull/1136)) + - azure_rm_virtualmachinescaleset: + - Upgrade azure-mgmt-marketplaceordering to 1.1.0 ([#940](https://github.com/ansible-collections/azure/pull/940)) + - Add support for new `managed_disk_type` type `UltraSSD_LRS` ([#1136](https://github.com/ansible-collections/azure/pull/1136)) + - azure_rm_virtualnetworkpeering_info: Add support for `peering_sync_level` ([#1085](https://github.com/ansible-collections/azure/pull/1085)) + - azure_rm_containerinstance: Add support for `subnet_ids` ([#1090](https://github.com/ansible-collections/azure/pull/1090)) + - azure_rm_containerinstance_info: Add support for `subnet_ids` ([#1090](https://github.com/ansible-collections/azure/pull/1090)) + - azure_rm_storageaccount: Add support for failover ([#1141](https://github.com/ansible-collections/azure/pull/1141)) + + +### BUG FIXING + - azure_rm_loganalyticsworkspace: Fix test cases ([#1129](https://github.com/ansible-collections/azure/pull/1129)) + - azure_rm_virtualmachine_info: Ensure `display_status` is initialised before it is used ([#1123](https://github.com/ansible-collections/azure/pull/1123)) + - azure_rm_webapp: + - Add support for creating with `python` ([#1128](https://github.com/ansible-collections/azure/pull/1128)) + - Fix azure_rm_webapp fails when state is `absent` ([#1079](https://github.com/ansible-collections/azure/pull/1079)) + - azure_rm_virtualmachine: Add option to choose whether or not to create a network security group ([#1056](https://github.com/ansible-collections/azure/pull/1056)) + - azure_rm_networkinterace: Fix idempotent failure ([#1037](https://github.com/ansible-collections/azure/pull/1037)) + - azure_rm_virtualnetwork: Update documentation of `azure_rm_virtualnetwork` to reflect that the `dns_servers` limit on length is no longer 2 ([#1082](https://github.com/ansible-collections/azure/pull/1082)) + - azure_rm_rediscache: Remove references to Redis 4 and support upgrading to Redis 6 ([#1132](https://github.com/ansible-collections/azure/pull/1132)) + - azure_rm_virtualnetwork_info: Update documentation in azure_rm_virtualnetwork_info and include a small change to match other patterns for getting network info. ([#1087](https://github.com/ansible-collections/azure/pull/1087)) + - azure_rm_snapshot: Add supprot for `incremental` ([#1135](https://github.com/ansible-collections/azure/pull/1135)) + - azure_rm_appgateway: Fix `version_added` in module document ([#1139](https://github.com/ansible-collections/azure/pull/1139)) + - azure_rm_*: Documentation fixes ([#1151](https://github.com/ansible-collections/azure/pull/1151)) + - azure_rm_devtestlab/aliases: Disable `azure_rm_devtestlab` test ([#1144](https://github.com/ansible-collections/azure/pull/1144)) + - inventory/azure_rm: **inventory** - Ignore response status code other than 200 ([#1166](https://github.com/ansible-collections/azure/pull/1166)) + - azure_rm_keyvaultkey: Use creds in module args when auth_source is auto ([#1010](https://github.com/ansible-collections/azure/pull/1010)) + - azure_rm_keyvaultkey_info: Use creds in module args when `auth_source` is `auto` ([#1010](https://github.com/ansible-collections/azure/pull/1010)) + - azure_rm_keyvaultsecret: Use creds in module args when `auth_source` is `auto` ([#1010](https://github.com/ansible-collections/azure/pull/1010)) + - azure_rm_keyvaultsecret_info: Use creds in module args when `auth_source` is `auto` ([#1010](https://github.com/ansible-collections/azure/pull/1010)) + - azure_rm_routetable: Fix route table updates delete all existing routes in the route table ([#1146](https://github.com/ansible-collections/azure/pull/1146)) + - azure_rm_cdnendpoint: Fix failed to clear CND endpoint ([#1154](https://github.com/ansible-collections/azure/pull/1154)) + - azure_rm_resource_info: Add support for `method` ([#1158](https://github.com/ansible-collections/azure/pull/1158)) + - azure_keyvault_secret: Add support for azure cli credential ([#1161](https://github.com/ansible-collections/azure/pull/1161)) + - requirements-azure.txt:Update dependency to resolve upstream issue ([#1169](https://github.com/ansible-collections/azure/pull/1169)) + +## v1.15.0 (2023-03-15) + +### NEW MODULES + - azure_rm_multiplemanageddisks: New module to create/update/delete/attach multiple disks ([#936](https://github.com/ansible-collections/azure/pull/936)) + - azure_rm_sqlelasticpool: Add azure_rm_sqlelasticpool.py to create Elastic Pool ([#1027](https://github.com/ansible-collections/azure/pull/1027)) + - azure_rm_sqlelasticpool_info: Add azure_rm_sqlelasticpool_info.py to get Elastic Pool info ([#1027](https://github.com/ansible-collections/azure/pull/1027)) + - azure_rm_sqlmanagedinstance: Add azure_rm_sqlmanagedinstance module ([#1039](https://github.com/ansible-collections/azure/pull/1039)) + - azure_rm_sqlmanagedinstance_info: Add azure_rm_sqlmanagedinstance module ([#1039](https://github.com/ansible-collections/azure/pull/1039)) + +### FEATURE ENHANCEMENT + - requirements-azure.txt: Upgrade azure-mgmt-apimanagement to 3.0.0 ([#943](https://github.com/ansible-collections/azure/pull/943)) + - azure_rm_openshiftmanagedcluster: Add new choices to vm_size in azure_rm_openshiftmanagedcluster.py ([#979](https://github.com/ansible-collections/azure/pull/979)) + - azure_rm_appgateway: Add new parameters to azure_rm_appgateway ([#990](https://github.com/ansible-collections/azure/pull/990)) + - azure_rm.py: Add compose support in inventory/azure_rm.py ([#1065](https://github.com/ansible-collections/azure/pull/1065)) + - azure_rm_backupazurevm: add option for recovery point expiry time ([#1057](https://github.com/ansible-collections/azure/pull/1057)) + - runtime.yml: Keep action_groups and modules list consistent #([1042](https://github.com/ansible-collections/azure/pull/1042)) + - azure_rm_virtualnetworkpeering: Add synchronizing of VNet peering when sync level is LocalNotInSync ([#1025](https://github.com/ansible-collections/azure/pull/1025)) + - azure_rm_deployment: Upgrade azure-mgmt-resource to 21.1.0 ([#960](https://github.com/ansible-collections/azure/pull/960)) + - azure_rm_deployment_info: Upgrade azure-mgmt-resource to 21.1.0 ([#960](https://github.com/ansible-collections/azure/pull/960)) + - azure_rm_lock: Upgrade azure-mgmt-resource to 21.1.0 ([#960](https://github.com/ansible-collections/azure/pull/960)) + - azure_rm_subscription: Upgrade azure-mgmt-resource to 21.1.0 ([#960](https://github.com/ansible-collections/azure/pull/960)) + - azure_rm_subscription_info: Upgrade azure-mgmt-resource to 21.1.0 ([#960](https://github.com/ansible-collections/azure/pull/960)) + - azure_rm_resourcegroup: Upgrade azure-mgmt-resource to 21.1.0 ([#960](https://github.com/ansible-collections/azure/pull/960)) + - azure_rm_resourcegroup_info: Upgrade azure-mgmt-resource to 21.1.0 ([#960](https://github.com/ansible-collections/azure/pull/960)) + - azure_rm_virtualmachine: Upgrade azure-mgmt-resource to 21.1.0 ([#960](https://github.com/ansible-collections/azure/pull/960)) + - azure_rm_storageblob: Make batch_upload honour `force` attribute in azure_rm_storageblob ([#1018](https://github.com/ansible-collections/azure/pull/1018)) + - azure_rm_virtualnetwork: Add `flow_timeout_in_minutes` to azure_rm_virtualnetwork ([#1036](https://github.com/ansible-collections/azure/pull/1036)) + - azure_rm_virtualnetwork_info: Add `flow_timeout_in_minutes` to azure_rm_virtualnetwork ([#1036](https://github.com/ansible-collections/azure/pull/1036)) + - requirements-azure.txt: Bump cryptography from 38.0.1 to 38.0.3 ([#1035](https://github.com/ansible-collections/azure/pull/1035)) + - azure_rm_galleryimageversion_info: Read paginated response for gallery image versions ([#1073](https://github.com/ansible-collections/azure/pull/1073)) + - azure_rm_virtualmachine: Add `security_profile` options to azure_rm_virtualmachine ([#1033](https://github.com/ansible-collections/azure/pull/1033)) + - azure_rm_virtualmachine_info: Add `security_profile` options to azure_rm_virtualmachine ([#1033](https://github.com/ansible-collections/azure/pull/1033)) + +### BUG FIXING + - azure_rm_deployment: Fix Ansible azure_rm_deployment module returns error but deployment in Azure was successful ([#986](https://github.com/ansible-collections/azure/pull/986)) + - azure_rm.py: support for environment variable ANSIBLE_AZURE_VM_RESOURCE_GROUPS ([#975](https://github.com/ansible-collections/azure/pull/975)) + - azure_rm_common.py: Ensure trailing slash on base_url ([#984](https://github.com/ansible-collections/azure/pull/984)) + - azure_rm_virtualmachine: Correct spelling errors in documents ([#1012](https://github.com/ansible-collections/azure/pull/1012)) + - azure_rm_storageblob: Format the md5 value returned by azure_rm_storageblob.py ([#1038](https://github.com/ansible-collections/azure/pull/1038)) + - aure_rm_loadbalancer: The zone default value is None if not configured ([#1060](https://github.com/ansible-collections/azure/pull/1060)) + - README.md: Correct spelling errors in documents ([#1059](https://github.com/ansible-collections/azure/pull/1059)) + - azure_rm_securitygroup: Fixed idempotent error due to protocol ([#1064](https://github.com/ansible-collections/azure/pull/1064)) + - azure_rm_roleassignment: Correct document case's config ([#1053](https://github.com/ansible-collections/azure/pull/1053)) + - azure_rm_privatednsrecordset: Change the defined long type to int ([#1058](https://github.com/ansible-collections/azure/pull/1058)) + - azure_rm_keyvault: Add the required restriction to the parameter ([#1054](https://github.com/ansible-collections/azure/pull/1054)) + - azure_rm_dnsrecordset: Change the defined long type to int ([#1052](https://github.com/ansible-collections/azure/pull/1052)) + - azure_rm_common.py: Add Ansible 2.14 and python 3.11 to CI ([#1074](https://github.com/ansible-collections/azure/pull/1074)) + - azure_rm_backuppolicy: Add Ansible 2.14 and python 3.11 to CI ([#1074](https://github.com/ansible-collections/azure/pull/1074)) + - azure_rm_manageddisk: Add Ansible 2.14 and python 3.11 to CI ([#1074](https://github.com/ansible-collections/azure/pull/1074)) + - azure_rm_multiplemanageddisks: Add Ansible 2.14 and python 3.11 to CI ([#1074](https://github.com/ansible-collections/azure/pull/1074)) + - azure_rm_sqlmanagedinstance: Add Ansible 2.14 and python 3.11 to CI ([#1074](https://github.com/ansible-collections/azure/pull/1074)) + - azure_rm_servicebussaspolicy: Add Ansible 2.14 and python 3.11 to CI ([#1074](https://github.com/ansible-collections/azure/pull/1074)) + - azure_rm_virtualmachine: Add Ansible 2.14 and python 3.11 to CI ([#1074](https://github.com/ansible-collections/azure/pull/1074)) + - azure_rm_securitygroup: azure_rm_securitygroup is changed without actual changes when only capitalization differs ([#1096](https://github.com/ansible-collections/azure/pull/1096)) + +## v1.14.0 (2022-10-31) + +### NEW MODULES + - azure_rm_firewallpolicy: Add new module `azure_rm_firewallpolicy` ([#705](https://github.com/ansible-collections/azure/pull/705)) + - azure_rm_privatelinkservice: Add new module `azure_rm_privatelinkservice` ([#858](https://github.com/ansible-collections/azure/pull/858)) + - azure_rm_privatelinkservice_info: Add new module `azure_rm_privatelinkservice_info` ([#858](https://github.com/ansible-collections/azure/pull/858)) + - azure_rm_privateendpointconnection: Add new module `azure_rm_privateendpointconnection` ([#858](https://github.com/ansible-collections/azure/pull/858)) + - azure_rm_privateendpointconnection_info: Add new module `azure_rm_privateendpointconnection_info` ([#858](https://github.com/ansible-collections/azure/pull/858)) + - azure_rm_natgateway: Add new module `azure_rm_natgateway` ([#860](https://github.com/ansible-collections/azure/pull/860)) + - azure_rm_natgateway_info: Add new module `azure_rm_natgateway_info` ([#860](https://github.com/ansible-collections/azure/pull/860)) + - azure_rm_bastionhost: Add new module `azure_rm_bastionhost` ([#873](https://github.com/ansible-collections/azure/pull/873)) + - azure_rm_bastionhost_info: Add new module `azure_rm_bastionhost_info` ([#873](https://github.com/ansible-collections/azure/pull/873)) + - azure_rm_account_info: Add new module `azure_rm_account_info` to get facts for current logged-in user ([#922](https://github.com/ansible-collections/azure/pull/922)) + - azure_rm_virtualhubconnection: Add new module `azure_rm_virtualhubconnection` ([#939](https://github.com/ansible-collections/azure/pull/939)) + - azure_rm_virtualhubconnection_info: Add new module `azure_rm_virtualhubconnection_info` ([#939](https://github.com/ansible-collections/azure/pull/939)) + - azure_rm_aksagentpool: Add new module `azure_rm_aksagentpool` ([#974](https://github.com/ansible-collections/azure/pull/974)) + - azure_rm_aksagentpool_info: Add new module `azure_rm_aksagentpool_info` ([#974](https://github.com/ansible-collections/azure/pull/974)) + - azure_rm_aksagentpoolversion_info: Add new module `azure_rm_aksagentpoolversion_info` ([#974](https://github.com/ansible-collections/azure/pull/974)) + +### FEATURE ENHANCEMENT + - azure_rm_container*: Azure container registry tags ([#830](https://github.com/ansible-collections/azure/pull/830)) + - azure_rm_loadbalancer: Support for `disable_outbound_snat` configuration ([#744](https://github.com/ansible-collections/azure/pull/744)) + - azure_rm_manageddisk: + - Support for create `StandardSSD_ZRS` and `Premium_ZRS` managed disks ([#855](https://github.com/ansible-collections/azure/pull/855)) + - Support for mount disk to multiple VMs ([#867](https://github.com/ansible-collections/azure/pull/867)) + - azure_rm_manageddisk_info: Support to mount disk to multiple VMs ([#867](https://github.com/ansible-collections/azure/pull/867)) + - azure_rm_virtualmachine: + - Support for create `StandardSSD_ZRS` and `Premium_ZRS` managed disks ([#855](https://github.com/ansible-collections/azure/pull/855)) + - Support for configure `enableAutomaticUpdates` ([#933](https://github.com/ansible-collections/azure/pull/933)) + - azure_rm_storageaccount: + - Support for configure `static_website` ([#878](https://github.com/ansible-collections/azure/pull/878)) + - Support for `public_network_access` ([#875](https://github.com/ansible-collections/azure/pull/875)) + - Support for create Azure Data Lake Storage Gen2 storage account ([#998](https://github.com/ansible-collections/azure/pull/998)) + - Support for encrypt storage account ([#937](https://github.com/ansible-collections/azure/pull/937)) + - azure_rm_storageaccount_info: + - Support for `public_network_access` ([#875](https://github.com/ansible-collections/azure/pull/875)) + - Support for Azure Data Lake Storage Gen2 ([#998](https://github.com/ansible-collections/azure/pull/998)) + - Support for encrypt storage account ([#937](https://github.com/ansible-collections/azure/pull/937)) + - azure_keyvault_secret: Add environment variables to keyvault lookup plugin ([#978](https://github.com/ansible-collections/azure/pull/978)) + - README.md: Added prompt to install virtual environment ([#910](https://github.com/ansible-collections/azure/pull/910)) + - azure_rm_keyvaultkey: Adding support for `key_type`, `key_attributes`, `key_size`, `curve` ([#930](https://github.com/ansible-collections/azure/pull/930)) + - azure_rm_virtualmachinescaleset: Add new parameter `os_disk_size_gb`, allowing set os disk size ([#961](https://github.com/ansible-collections/azure/pull/961)) + - azure_rm_privateendpoint_info: Add connection details ([#965](https://github.com/ansible-collections/azure/pull/965)) + - azure_rm_aks: Support for upgrade nodepool kubernetes version ([#966](https://github.com/ansible-collections/azure/pull/966)) + - azure_rm_virtualnetworkgateway: Support set Virtual Network Gateway Generation ([#921](https://github.com/ansible-collections/azure/pull/921)) + - azure_rm_storage*: Update Storage dependencies ([#833](https://github.com/ansible-collections/azure/pull/833)) + - azure_rm_appserviceplan*: Update azure.mgmt.web ([#849](https://github.com/ansible-collections/azure/pull/849)) + - azure_rm_functionapp*: Update dependencies ([#849](https://github.com/ansible-collections/azure/pull/849)) + - azure_rm_webapp*: Update dependencies ([#849](https://github.com/ansible-collections/azure/pull/849)) + - azure_rm_backup*: Upgrade azure-mgmt-recoveryservice relate dependence ([#895](https://github.com/ansible-collections/azure/pull/895)) + - azure_rm_dns*: Upgrade azure-mgmt-dns to `v8.0.0` ([#879](https://github.com/ansible-collections/azure/pull/879)) + - azure_rm_cognitivesearch: Upgrade azure-mgmt-search to `v8.0.0` ([#896](https://github.com/ansible-collections/azure/pull/896)) + - azure_rm_cognitivesearch_info: Upgrade azure-mgmt-search to `v8.0.0` ([#896](https://github.com/ansible-collections/azure/pull/896)) + - azure_rm_privatedns*: Upgrade azure-mgmt-privatedns to `v1.0.0` ([#880](https://github.com/ansible-collections/azure/pull/880)) + - azure_rm_aks*: Upgrade azure-mgmt-containerservice to `v20.0.0` ([#881](https://github.com/ansible-collections/azure/pull/881)) + - azure_rm_containerinstance*: Upgrade azure-mgmt-containerinstance to `v9.0.0` ([#882](https://github.com/ansible-collections/azure/pull/882)) + - azure_rm_mysql*: Upgrade azure-mgmt-rdbms to `v10.0.0` ([#884](https://github.com/ansible-collections/azure/pull/884)) + - azure_rm_mariadb*: Upgrade azure-mgmt-rdbms to `v10.0.0` ([#884](https://github.com/ansible-collections/azure/pull/884)) + - azure_rm_postgresql*: Upgrade azure-mgmt-rdbms to `v10.0.0` ([#884](https://github.com/ansible-collections/azure/pull/884)) + - azure_rm_trafficmanager*: Upgrade azure-mgmt-trafficmanager to `v1.0.0` ([#886](https://github.com/ansible-collections/azure/pull/886)) + - azure_rm_loganalyticsworkspace: Upgrade azure-mgmt-loganalytics to `v12.0.0` ([#888](https://github.com/ansible-collections/azure/pull/888)) + - azure_rm_loganalyticsworkspace_info: Upgrade azure-mgmt-loganalytics to `v12.0.0` ([#888](https://github.com/ansible-collections/azure/pull/888)) + - azure_rm_servicebus*: Upgrade azure-mgmt-servicebus to `v7.1.0` ([#889](https://github.com/ansible-collections/azure/pull/889)) + - azure_rm_iothub*: Upgrade azure-mgmt-iothub to `v2.2.0` ([#892](https://github.com/ansible-collections/azure/pull/892)) + - azure_rm_datalakestore: Upgrade azure-mgmt-datalake-store to `v1.0.0` ([#898](https://github.com/ansible-collections/azure/pull/898)) + - azure_rm_datalakestore_info: Upgrade azure-mgmt-datalake-store to `v1.0.0` ([#898](https://github.com/ansible-collections/azure/pull/898)) + - azure_rm_eventhub: Upgrade azure-mgmt-eventhubs to `v10.1.0` ([#900](https://github.com/ansible-collections/azure/pull/900)) + - azure_rm_eventhub_info: Upgrade azure-mgmt-eventhubs to `v10.1.0` ([#900](https://github.com/ansible-collections/azure/pull/900)) + - azure_rm_notificationhub: Upgrade azure-mgmt-notificationhubs to `v7.0.0` ([#899](https://github.com/ansible-collections/azure/pull/899)) + - azure_rm_notificationhub_info: Upgrade azure-mgmt-notificationhubs to `v7.0.0` ([#899](https://github.com/ansible-collections/azure/pull/899)) + - azure_rm_cdn*: Upgrade azure-mgmt-cdn to `v11.0.0` ([#945](https://github.com/ansible-collections/azure/pull/945)) + - azure_rm_registration*: Upgrade azure-mgmt-managedservices to `v6.0.0` ([#948](https://github.com/ansible-collections/azure/pull/948)) + - azure_rm_hdinsightcluster: Upgrade hdinsight dependence to `v9.0.0` ([#951](https://github.com/ansible-collections/azure/pull/951)) + - azure_rm_hdinsightcluster_info: Upgrade hdinsight dependence to `v9.0.0` ([#951](https://github.com/ansible-collections/azure/pull/951)) + - azure_rm_role*: Upgrade azure-mgmt-authorizaiton to `v2.0.0` ([#955](https://github.com/ansible-collections/azure/pull/955)) + - azure_rm_cosmosdbaccount: Upgrade azure-mgmt-cosmosdb to `v6.4.0` ([#952](https://github.com/ansible-collections/azure/pull/952)) + - azure_rm_cosmosdbaccount_info: Upgrade azure-mgmt-cosmosdb to `v6.4.0` ([#952](https://github.com/ansible-collections/azure/pull/952)) + - azure_rm_keyvault*: upgrade azure-mgmt-keyvault to `v10.0.0` ([#959](https://github.com/ansible-collections/azure/pull/959)) + - requirements-azure.txt: Update azure-mgmt-core to `v1.3.0` ([#907](https://github.com/ansible-collections/azure/pull/907)) + +### BUG FIXING + - azure_rm_keyvault_info: Fix `azure_rm_keyvault_info` `Resource.properties` not found error ([#872](https://github.com/ansible-collections/azure/pull/872)) + - azure_rm_aks: + - Change `aad_profile.admin_group_object_ids` to take a list of string ([#865](https://github.com/ansible-collections/azure/pull/865)) + - Fix `authorized_ip_ranges` not set bug ([#912](https://github.com/ansible-collections/azure/pull/912)) + - azure_rm_manageddisk: + - Add missing parameters ([#925](https://github.com/ansible-collections/azure/pull/925)) + - If the disk exists, obtain parameters not configured ([#876](https://github.com/ansible-collections/azure/pull/876)) + - Add required option(`storage_account_id`) when importing a disk image ([#877](https://github.com/ansible-collections/azure/pull/877)) + - azure_rm_deployment_info: Fix API breaking change, replace `list` with `list_by_resource_group` ([#857](https://github.com/ansible-collections/azure/pull/857)) + - azure_rm_publicipaddress: Fix property get error.([#908](https://github.com/ansible-collections/azure/pull/908)) + - azure_rm_keyvault*: Fixes `credential_scopes` for track2 authentication when connecting to non-Azure Public cloud environments ([#854](https://github.com/ansible-collections/azure/pull/854)) + - azure_rm_keyvault: Expose `soft_delete_retention_in_days` ([#906](https://github.com/ansible-collections/azure/pull/906)) + - azure_rm_virtualmachine: Remove `started` default value ([#915](https://github.com/ansible-collections/azure/pull/915)) + - azure_rm_storageaccount: Add missing account type `Standard_GZRS` and `Standard_RAGZRS` ([#931](https://github.com/ansible-collections/azure/pull/931)) + - azure_rm_common: + - Replace `config` with `_config` in `azure_rm_common.py` to support the latest version of azure-mgmt-network ([#904](https://github.com/ansible-collections/azure/pull/904)) + - Fix azurerm MSI authentication with other Azure Cloud ([#894](https://github.com/ansible-collections/azure/pull/894)) + - Fix a sanity error ([#946](https://github.com/ansible-collections/azure/pull/946)) + - azure_rm_azurefirewall: Correct firewall action examples ([#962](https://github.com/ansible-collections/azure/pull/962)) + - azure_rm_webappaccessrestriction: Update test case ([#964](https://github.com/ansible-collections/azure/pull/964)) + +## v1.13.0 (2022-05-27) + +### NEW MODULES + - azure_rm_automationrunbook: Add new module azure_rm_automationrunbook ([#797](https://github.com/ansible-collections/azure/pull/797)) + - azure_rm_automationrunbook_info: Add new module azure_rm_automationrunbook ([#797](https://github.com/ansible-collections/azure/pull/797)) + - azure_rm_openshiftmanagedcluster_info: Add new module azure_rm_openshiftmanagedcluster_info ([#755](https://github.com/ansible-collections/azure/pull/755)) + - azure_keyvault_secret: KeyVault Lookup Plugin ([#109](https://github.com/ansible-collections/azure/pull/109)) + - azure_rm_datafactory: Add new module azure_rm_datafacotry ([#840](https://github.com/ansible-collections/azure/pull/840)) + - azure_rm_datafactory_info: Add new module azure_rm_datafacotry ([#840](https://github.com/ansible-collections/azure/pull/840)) + +### FEATURE ENHANCEMENT + - azure_rm_common.py: Upgrade azure-mgmt-automation to v1.0.0 ([#791](https://github.com/ansible-collections/azure/pull/791)) + - azure_rm_automationaccount: Upgrade azure-mgmt-automation to v1.0.0 ([#791](https://github.com/ansible-collections/azure/pull/791)) + - azure_rm_automationaccount_info: Upgrade azure-mgmt-automation to v1.0.0 ([#791](https://github.com/ansible-collections/azure/pull/791)) + - azure_rm_loadbalancer: Add support for `zones` ([#801](https://github.com/ansible-collections/azure/pull/801)) + - azure_rm_loadbalancer_info: Add support for `zones` ([#801](https://github.com/ansible-collections/azure/pull/801)) + - azure_rm.py: Update azure_rm examples ([#810](https://github.com/ansible-collections/azure/pull/810)) + - azure_rm_virtualmachinescaleset: Add support for `platform_fault_domain_count`, `orchestration_mode` ([#779](https://github.com/ansible-collections/azure/pull/779)) + - azure_rm_virtualmachinescaleset_info: Add support for `platform_fault_domain_count`, `orchestration_mode` ([#779](https://github.com/ansible-collections/azure/pull/779)) + - azure_rm_rediscache: Add support for `minimum_tls_version`, `public_network_access`, `redis_version` ([#680](https://github.com/ansible-collections/azure/pull/680)) + - azure_rm_rediscache_info: Add support for `minimum_tls_version`, `public_network_access`, `redis_version` ([#680](https://github.com/ansible-collections/azure/pull/680)) + - azure_rm_rediscachefirewallrule: Upgrade to truck2 SDK ([#680](https://github.com/ansible-collections/azure/pull/680)) + - azure_rm_appgateway: Add rewrite rule capability to appgateway module ([#747](https://github.com/ansible-collections/azure/pull/747)) + - azure_rm_appgateway_info: Add rewrite rule capability to appgateway module ([#747](https://github.com/ansible-collections/azure/pull/747)) + - azure_rm_sqlserver: SQL Database enhancement ([#681](https://github.com/ansible-collections/azure/pull/681)) + - azure_rm_common.py: Bump SQL SDK to v3 ([#681](https://github.com/ansible-collections/azure/pull/681)) + - azure_rm_cosmosdbaccount: Add support for `enable_free_tier`, `mongo_version`, `public_network_access`, `ip_range_filter` parameter is being deprecated in favor of `ip_rules` ([#675](https://github.com/ansible-collections/azure/pull/675)) + - azure_rm_cosmosdbaccount_info: Add support for `enable_free_tier`, `mongo_version`, `public_network_access`, `ip_range_filter` parameter is being deprecated in favor of `ip_rules` ([#675](https://github.com/ansible-collections/azure/pull/675)) + - azure_rm_publicipaddress: Add support for `zones` ([#829](https://github.com/ansible-collections/azure/pull/829)) + - azure_rm_publicipaddress_info: Add support for `zones` ([#829](https://github.com/ansible-collections/azure/pull/829)) + - azure_rm_image: Add support `hyper_v_generation` ([#832](https://github.com/ansible-collections/azure/pull/832)) + - azure_rm_image_info: Add support `hyper_v_generation` ([#832](https://github.com/ansible-collections/azure/pull/832)) + +### BUG FIXING + - pr-pipelines.yml: Add python3.9 for CI ([#783](https://github.com/ansible-collections/azure/pull/783)) + - config.yml: Update test configure ([#790](https://github.com/ansible-collections/azure/pull/790)) + - azure_rm_manageddisk: Fixed the inconsistent return value of `attach_caching` caused by the azure-mgmt-compute upgrade ([#799](https://github.com/ansible-collections/azure/pull/799)) + - azure_rm_loadbalancer: Fix forced update bug caused by azure_rm_loadbalancer obtaining subnet predefined value ([#800](https://github.com/ansible-collections/azure/pull/800)) + - azure_rm_virtualmachine: Add license type for RHEL/SLES Azure Hybrid Benefit ([#804](https://github.com/ansible-collections/azure/pull/804)) + - azure_rm_*: Update the document to meet the change requirements of Ansible 2.14 ([#814](https://github.com/ansible-collections/azure/pull/814)) + - azure_rm_appgateway_info: Update azure_rm_appgateway_info to use track2 dependencies ([#817](https://github.com/ansible-collections/azure/pull/817)) + - azure_rm_virtualmachine: Fix virtual machine top issue ([#767](https://github.com/ansible-collections/azure/pull/767)) + - azure_rm_subscription_info: Update azure_rm_subscription_info tags element type ([#819](https://github.com/ansible-collections/azure/pull/819)) + - azure_rm_manageddisk: Fix `os_type` comparison in azure_rm_manageddisk with existing disk ([#621](https://github.com/ansible-collections/azure/pull/621)) + - azure_rm_appgateway: Persist SSL configuration for appgateway ([#746](https://github.com/ansible-collections/azure/pull/746)) + - azure_rm_appgateway_info: Persist SSL configuration for appgateway ([#746](https://github.com/ansible-collections/azure/pull/746)) + - azure_rm_publicipaddress: Fix azure_rm_publicipaddress documentation page throws error ([#822]( https://github.com/ansible-collections/azure/pull/822)) + - azure_keyvault_secret: Fix Ansible dev version Sanity error in plugin file ([#825](https://github.com/ansible-collections/azure/pull/825)) + - azure_rm_rediscache: Fix Ansible dev version Sanity error in plugin file ([#825](https://github.com/ansible-collections/azure/pull/825)) + - azure_rm_keyvaultkey: Improved keyvault interaction auth_source=cli logic ([#823](https://github.com/ansible-collections/azure/pull/823)) + - azure_rm_keyvaultkey_info: Improved keyvault interaction auth_source=cli logic ([#823](https://github.com/ansible-collections/azure/pull/823)) + - azure_rm_keyvaultsecret: Improved keyvault interaction auth_source=cli logic ([#823](https://github.com/ansible-collections/azure/pull/823)) + - azure_rm_keyvaultsecret_info: Improved keyvault interaction auth_source=cli logic ([#823](https://github.com/ansible-collections/azure/pull/823)) + - azure_keyvault_secret: Add `hyper_v_generation` feature to azure_rm_image module ([#832](https://github.com/ansible-collections/azure/pull/832)) + - azure_rm_webapp: Correct documentation of return attribute for azure_rm_webapp ([#846](https://github.com/ansible-collections/azure/pull/846)) + - azure_rm_virtualmachine: When zones is null, there is no need to compare ([#853](https://github.com/ansible-collections/azure/pull/853)) + - All info modules: Change the tags type of the Info module to list and the element type to string ([#821](https://github.com/ansible-collections/azure/pull/821)) + - azcollection: Install collection to local directory during development ([#763](https://github.com/ansible-collections/azure/pull/763)) + +### BREAKING CHANGES: + - azure_rm_virtualmachinescaleset: Change default value of `single_placement_group` from `True` to `False` ([#851](https://github.com/ansible-collections/azure/pull/851)) + +## v1.12.0 (2022-03-14) + +### NEW MODULES + +### FEATURE ENHANCEMENT + - azure_rm_privateendpointdnszonegroup: Add `private_dns_zone_id` for `azure_rm_privateendpointdnszonegroup` ([#735](https://github.com/ansible-collections/azure/pull/735)) + - azure_rm_virtualmachineextension: Ignore comparing `protected_settings` ([#580](https://github.com/ansible-collections/azure/pull/580)) + - azure_rm_aks: Add new parameter to enable AAD profile ([#654](https://github.com/ansible-collections/azure/pull/654)) + - azure_rm_*: Upgrade azure-mgmt-network to 19.1.0 ([#729](https://github.com/ansible-collections/azure/pull/729)) + - azure_rm_sqldatabase: Parse datetime module arguments ([#623](https://github.com/ansible-collections/azure/pull/623)) + - azure_rm_sqldatabase_info: Parse datetime module arguments ([#623](https://github.com/ansible-collections/azure/pull/623)) + - azure_rm_virtualmachine: Add VM status detection mechanism ([#772](https://github.com/ansible-collections/azure/pull/772)) + - azure_rm_*: Upgrade azure-mgmt-compute SDK to track2 SDK ([#672](https://github.com/ansible-collections/azure/pull/672)) + - azure_rm_*: Upgrade azure-mgmt-storage to 19.0.0 ([#777](https://github.com/ansible-collections/azure/pull/777)) + - requirements-azure.txt: Update azure-cli-core to 2.34.0 ([#775](https://github.com/ansible-collections/azure/pull/775)) + - azure_rm_virtualmachine: Update `azure_rm_galleryimage` to allow Hyper-V Generation ([#647](https://github.com/ansible-collections/azure/pull/647)) + +### BUG FIXING + - azure_rm_roleassignment: Fix mismatch assignment error ([#613](https://github.com/ansible-collections/azure/pull/613)) + - README.md: Delete unnecessary backtick in readme ([#736](https://github.com/ansible-collections/azure/pull/736)) + - azure_rm_availabilityset: Fix `check_mode` support ([#627](https://github.com/ansible-collections/azure/pull/627)) + - azure_rm_manageddisk: Fix `azure_rm_manageddisk` caching comparison ([#624](https://github.com/ansible-collections/azure/pull/624)) + - azure_rm_publicipaddress: Add mandatory field when updating IPAddress ([#752](https://github.com/ansible-collections/azure/pull/752)) + - azure_rm_common: Remove unused and deprecated `VERSION` import ([#751](https://github.com/ansible-collections/azure/pull/751)) + - azure_rm_keyvaultkey: Conditionally call non MSI authorization when interacting with keyvault ([#770](https://github.com/ansible-collections/azure/pull/770)) + - azure_rm_keyvaultkey_info: Conditionally call non MSI authorization when interacting with keyvault ([#770](https://github.com/ansible-collections/azure/pull/770)) + - azure_rm_keyvaultsecret: Conditionally call non MSI authorization when interacting with keyvault ([#770](https://github.com/ansible-collections/azure/pull/770)) + - azure_rm_keyvaultsecret_info: Conditionally call non MSI authorization when interacting with keyvault ([#770](https://github.com/ansible-collections/azure/pull/770)) + - azure_rm_common: Fix typo error. ([#769](https://github.com/ansible-collections/azure/pull/769)) + - azure_rm_cosmosdbaccount: Update test case region ([#776](https://github.com/ansible-collections/azure/pull/776)) + - azure_rm_virtualmachine_info: Fix VM info module for failed VM provisions ([#745](https://github.com/ansible-collections/azure/pull/745)) + - azure_rm_loadbalancer_info: Fix documentation issue ([#719](https://github.com/ansible-collections/azure/pull/719)) + - azure_rm: Fix ansible 2.13 sanity fail ([#778](https://github.com/ansible-collections/azure/pull/778)) + - azure_rm: Append secondary network information to relevant `hostvars` ([#733](https://github.com/ansible-collections/azure/pull/733)) + + +## v1.11.0 (2022-01-18) + +### NEW MODULES + - azure_rm_virtualhub: New Module azure_rm_virtualhub ([#597](https://github.com/ansible-collections/azure/pull/597)) + - azure_rm_virtualhub_info: New Module azure_rm_virtualhub_info ([#597](https://github.com/ansible-collections/azure/pull/597)) + - azure_rm_hostgroup: New Module : azure_rm_hostgroup ([#704](https://github.com/ansible-collections/azure/pull/704)) + - azure_rm_hostgroup_info: New Module : azure_rm_hostgroup ([#704](https://github.com/ansible-collections/azure/pull/704)) + - azure_rm_privateendpointdnszonegroup: Add module for private endpoint DNS zone groups ([#689](https://github.com/ansible-collections/azure/pull/689)) + - azure_rm_privateendpointdnszonegroup_info: Add module for private endpoint DNS zone groups ([#689](https://github.com/ansible-collections/azure/pull/689)) + - azure_rm_monitordiagnosticsetting: Add new monitor diagnostic setting modules ([#701](https://github.com/ansible-collections/azure/pull/701)) + - azure_rm_monitordiagnosticsetting_info: Add new monitor diagnostic setting modules ([#701](https://github.com/ansible-collections/azure/pull/701)) + - azure_rm_storageshare: Azure storage file share module ([#603](https://github.com/ansible-collections/azure/pull/603)) + - azure_rm_storageshare_info: Azure storage file share module ([#603](https://github.com/ansible-collections/azure/pull/603)) + - azure_rm_appgateway_info: Application gateway start/stop ability and info module ([#673](https://github.com/ansible-collections/azure/pull/673)) + +### FEATURE ENHANCEMENT + - azure_rm_webapp: Add additional parameters for webapp site config ([#695](https://github.com/ansible-collections/azure/pull/695)) + - azure_rm_webapp_info: Add additional parameters for webapp site config ([#695](https://github.com/ansible-collections/azure/pull/695)) + - azure_rm: Add managed disks list to dynamic inventory hostvars ([#687](https://github.com/ansible-collections/azure/pull/687)) + - azure_rm_networkinterface: Add ability to connect network interface to application gateway backend pool ([#683](https://github.com/ansible-collections/azure/pull/683)) + - azure_rm_networkinterface_info: Add ability to connect network interface to application gateway backend pool ([#683](https://github.com/ansible-collections/azure/pull/683)) + - azure_rm_keyvaultsecret: feat: Add expiry information for keyvaultsecrets ([#660](https://github.com/ansible-collections/azure/pull/660)) + - azure_rm_virtualmachine_info: Verify the VM status after created ([#657](https://github.com/ansible-collections/azure/pull/657)) + - azure_rm_appgateway: Add advanced routing/redirect support for application gateway ([#685](https://github.com/ansible-collections/azure/pull/685)) + - azure_rm_virtualmachine: Add new parameter `proximity_placement_group` ([#611](https://github.com/ansible-collections/azure/pull/611)) + - azure_rm_virtualmachine_info: Add new parameter `proximity_placement_group` ([#611](https://github.com/ansible-collections/azure/pull/611)) + - azure_rm_dnsrecordset: Added Metadata support ([#589](https://github.com/ansible-collections/azure/pull/589)) + - azure_rm_dnsrecordset_info: Added Metadata support ([#589](https://github.com/ansible-collections/azure/pull/589)) + - azure_rm_virtualmachine_info: Add managed disk ID to returned facts for data disks ([#682](https://github.com/ansible-collections/azure/pull/682)) + - azure_rm_appgateway: Application gateway start/stop ability ([#673](https://github.com/ansible-collections/azure/pull/673)) + - azure_rm_aks: Add new feature - `outbound_type` ([#651](https://github.com/ansible-collections/azure/pull/651)) + - azure_rm_common: Support track2 SDK CLI authorization ([#676](https://github.com/ansible-collections/azure/pull/676)) + +### BUG FIXING + - azure_rm_common: Supprot track2 SDK ([#670](https://github.com/ansible-collections/azure/pull/670)) + - azure_rm_common: Allow module-level subscription id to be used for cross-subscription resource management ([#694](https://github.com/ansible-collections/azure/pull/694)) + - azure_rm_appserviceplan: Correct idempotency and premium SKU plans ([#693](https://github.com/ansible-collections/azure/pull/693)) + - ignore-2.13.txt: Update ignore file ([#696](https://github.com/ansible-collections/azure/pull/696)) + - ignore-2.12.txt: Update ignore file ([#696](https://github.com/ansible-collections/azure/pull/696)) + - ignore-2.11.txt: Update ignore file ([#696](https://github.com/ansible-collections/azure/pull/696)) + - ignore-2.10.txt: Update ignore file ([#696](https://github.com/ansible-collections/azure/pull/696)) + - azure_rm_virtualmachine: Misc typo fixes ([#698](https://github.com/ansible-collections/azure/pull/698)) + - azure_rm_publicipaddress: Misc typo fixes ([#698](https://github.com/ansible-collections/azure/pull/698)) + - azure_rm_virtualmachinescaleset: Misc typo fixes ([#698](https://github.com/ansible-collections/azure/pull/698)) + - azure_rm_appgateway: Update `state` document ([#674](https://github.com/ansible-collections/azure/pull/674)) + - azure_rm_dnsrecordset_info: Fixed error where recordset relative did not exist ([#706](https://github.com/ansible-collections/azure/pull/706)) + - azure_rm_cosmosdbaccount_info: Correct cosmosdb info module when loading by resource group ([#709](https://github.com/ansible-collections/azure/pull/709)) + - azure_rm_notificationhub: Avoid the case when service returns None ([#718](https://github.com/ansible-collections/azure/pull/718)) + - azure_rm_notificationhub_info: Avoid the case when service returns None ([#718](https://github.com/ansible-collections/azure/pull/718)) + - azure_rm_common: common: Handle exception raised while loading profile ([#610](https://github.com/ansible-collections/azure/pull/610)) + - README.md: Clarify document for installing collection and dependencies ([#716](https://github.com/ansible-collections/azure/pull/716)) + - azure_rm_deployment: azure_rm_deployment : Fixed tags related bug ([#641](https://github.com/ansible-collections/azure/pull/641)) + - azure_rm_subnet: Dissociate routetable from subnet ([#727](https://github.com/ansible-collections/azure/pull/727)) + - azure_rm_securitygroup_info: Align `azure_rm_securitygroup_info` return to match `azure_rm_securitygroup` ([#726](https://github.com/ansible-collections/azure/pull/726)) + + +## v1.10.0 (2021-10-22) + +### NEW MODULES + - azure_rm_virtualmachinesize_info: VirtualMachineSize facts module ([#605](https://github.com/ansible-collections/azure/pull/605)) + - azure_rm_diskencryptionset: New module: azure_rm_diskencryptionset ([#552](https://github.com/ansible-collections/azure/pull/552)) + - azure_rm_diskencryptionset_info: New module: azure_rm_diskencryptionset ([#552](https://github.com/ansible-collections/azure/pull/552)) + +### FEATURE ENHANCEMENT + - azure_rm_availabilityset: Add ProximityPlacementGroup to azure_rm_availabilityset ([#612](https://github.com/ansible-collections/azure/pull/612)) + - main.yml: Update vmss test case ([#633](https://github.com/ansible-collections/azure/pull/633)) + - main.yml: Enable VMSS TEST ([#634](https://github.com/ansible-collections/azure/pull/634)) + - azure_rm_keyvault: Add new parameter enable_purge_protection to azure_rm_keyvault ([#643](https://github.com/ansible-collections/azure/pull/643)) + - azure_rm_keyvault_info: Add new parameter enable_purge_protection to azure_rm_keyvault ([#643](https://github.com/ansible-collections/azure/pull/643)) + - azure_rm_containerinstance: Fixed issue #232 Added Volume mount support for container instances ([#338](https://github.com/ansible-collections/azure/pull/338)) + - azure_rm_containerinstance_info: Fixed issue #232 Added Volume mount support for container instances ([#338](https://github.com/ansible-collections/azure/pull/338)) + - ignore-2.13.txt: Copy ignore-2.12.txt to ignore-2.13.txt ([#642](https://github.com/ansible-collections/azure/pull/642)) + - azure_rm_mysqlserver: Add new parameter (azure_rm_mysqlserver.py)--- restarted ([#600](https://github.com/ansible-collections/azure/pull/600)) + +### BUG FIXING + - azure_rm_virtualmachineimage_info: Support to get the latest version of a virtual machine image ([#617](https://github.com/ansible-collections/azure/pull/617)) + - azure_rm_virtualmachine: azure_rm_virtualmachine: suppress no_log warning on ssh_password_enabled parameter ([#622](https://github.com/ansible-collections/azure/pull/622)) + - azure_rm_mysqlserver: Remove version 5.6, bump minimum version from 5.6 to 5.7 ([#626](https://github.com/ansible-collections/azure/pull/626)) + - azure_rm_manageddisk: Update azure_rm_manageddisk Doc to reflect return value ([#616]( https://github.com/ansible-collections/azure/pull/616)) + - azure_rm_managementgroup_info: bugfix for azure_rm_managementgroup_info module, subscriptions not detected as correct type ([#630](https://github.com/ansible-collections/azure/pull/630)) + - azure_rm_manageddisk: Fix manageddisk unmount documentation ([#649](https://github.com/ansible-collections/azure/pull/649)) + - azure_rm_securitygroup: Fix azure_rm_securitygroup doc ([#640](https://github.com/ansible-collections/azure/pull/640)) + + +## v1.9.0 (2021-08-23) + +### NEW MODULES + - azure_rm_ddosprotectionplan: New module: azure_rm_ddosprotectionplan ([#493](https://github.com/ansible-collections/azure/pull/493)) + - azure_rm_ddosprotectionplan_info: New module: azure_rm_ddosprotectionplan ([#493](https://github.com/ansible-collections/azure/pull/493)) + - azure_rm_privateendpoint: Azure rm privateendpoint ([#593](https://github.com/ansible-collections/azure/pull/593)) + - azure_rm_privateendpoint_info: Azure rm privateendpoint ([#593](https://github.com/ansible-collections/azure/pull/593)) + - azure_rm_webappaccessrestriction: New modules for webapp network access restrictions ([#594](https://github.com/ansible-collections/azure/pull/594)) + - azure_rm_webappaccessrestriction_info: New modules for webapp network access restrictions ([#594](https://github.com/ansible-collections/azure/pull/594)) + - azure_rm_webappvnetconnection: New modules for webapp vnet connection ([#590](https://github.com/ansible-collections/azure/pull/590)) + - azure_rm_webappvnetconnection_info: New modules for webapp vnet connection ([#590](https://github.com/ansible-collections/azure/pull/590)) + +### FEATURE ENHANCEMENT + - azure_rm_networkinterface: Allow IPv6 with NetworkInterfaceIPConfiguration ([#582](https://github.com/ansible-collections/azure/pull/582)) + - azure_rm_postgresqlserver: postgres server backup-support ([#566](https://github.com/ansible-collections/azure/pull/566)) + - azure_rm_virtualmachine: Addition of Spot instance support for VM and VMSS ([#559](https://github.com/ansible-collections/azure/pull/559)) + - azure_rm_virtualmachinescaleset: Addition of Spot instance support for VM and VMSS ([#559](https://github.com/ansible-collections/azure/pull/559)) + - azure_rm_appgateway: Add support for application gateway path-based routing ([#452](https://github.com/ansible-collections/azure/pull/452)) + - main.yml: Virtual machine test case update ([#595](https://github.com/ansible-collections/azure/pull/595)) + - azure_rm_appgateway: Allow application gateway probe to use host header from HTTP settings ([#450](https://github.com/ansible-collections/azure/pull/450)) + - azure_rm_*_info: Fixed dev branch sanity error ([#596](https://github.com/ansible-collections/azure/pull/596)) + +### BUG FIXING + - runtime.yml: Add runtime.yml ([#587](https://github.com/ansible-collections/azure/pull/587)) + - galaxy.yml: Add resource tags ([#592](https://github.com/ansible-collections/azure/pull/592)) + - CONTRIBUTING.md: Update contributing notes for dev/testing ([#574](https://github.com/ansible-collections/azure/pull/574)) + - main.yml: BUG FIX: Get latest VM image version ([#606](https://github.com/ansible-collections/azure/pull/606)) + + +## v1.8.0 (2021-08-02) + +### NEW MODULES + - azure_rm_notificationhub: New module: azure_rm_notificationhub ([#496](https://github.com/ansible-collections/azure/pull/496/)) + - azure_rm_notificationhub_info: New module: azure_rm_notificationhub ([#496](https://github.com/ansible-collections/azure/pull/496/)) + - azure_rm_expressroute: New module: azure_rm_expressroute ([#484](https://github.com/ansible-collections/azure/pull/484)) + - azure_rm_expressroute_info: New module: azure_rm_expressroute ([#484](https://github.com/ansible-collections/azure/pull/484)) + +### FEATURE ENHANCEMENT + - azure_rm_aks: azure_rm_aks: cluster client & models API version ([#497](https://github.com/ansible-collections/azure/pull/497)) + - azure_rm_aks: add new paramter node_labels for agent_pool ([#577](https://github.com/ansible-collections/azure/pull/577)) + - azure_rm_aks: azure_rm_aks: support system-assigned (managed) identity, ([#514](https://github.com/ansible-collections/azure/pull/514)) + - azure_rm_mysqlserver: Add new feature storage_profile ([#563](https://github.com/ansible-collections/azure/pull/563)) + +### BUG FIXING + - azure_rm_virtualmachine_info: Add name to return data_disks ([#565](https://github.com/ansible-collections/azure/pull/565)) + - azure_rm_loadbalancer: enable_floating_ip is for SQL AlwaysOn not SNAT ([#560](https://github.com/ansible-collections/azure/pull/560)) + - azure_rm_containerregistry: Add return value for azure_rm_containerregistry idempotent test ([#578](https://github.com/ansible-collections/azure/pull/578)) + - azure_rm_containerregistry_info: Add return value for azure_rm_containerregistry idempotent test ([#578](https://github.com/ansible-collections/azure/pull/578)) + - azure_rm_roleasignment: azure_rm_roleassignment bugfix ([#464](https://github.com/ansible-collections/azure/pull/464)) + - azure_rm_roleasignment_info: azure_rm_roleassignment bugfix ([#464](https://github.com/ansible-collections/azure/pull/464)) + - azure_rm_aks: Upddate test case ([#585](https://github.com/ansible-collections/azure/pull/585)) + - azure_rm_cosmosdbaccount: Upddate test case ([#585](https://github.com/ansible-collections/azure/pull/585)) + + +## v1.7.0 (2021-06-08) + +### NEW MODULES + - azure_rm_adapplication: New module: azure_rm_adapplication ([#215](https://github.com/ansible-collections/azure/pull/215)) + - azure_rm_adapplication_info: New module: azure_rm_adapplication ([#215](https://github.com/ansible-collections/azure/pull/215)) + - azure_rm_adgroup: New module: azure_rm_adgroup ([#423](https://github.com/ansible-collections/azure/pull/423)) + - azure_rm_adgroup_info: New module: azure_rm_adgroup ([#423](https://github.com/ansible-collections/azure/pull/423)) + - azure_rm_apimanagement: New Module [API Management] ([#322](https://github.com/ansible-collections/azure/pull/322)) + - azure_rm_apimanagement_info: New Module [API Management] ([#322](https://github.com/ansible-collections/azure/pull/322)) + - azure_rm_ipgroup: New module: azure_rm_ipgroup ([#528](https://github.com/ansible-collections/azure/pull/528)) + - azure_rm_ipgroup_info: New module: azure_rm_ipgroup ([#528](https://github.com/ansible-collections/azure/pull/528)) + - azure_rm_eventhub: New module: azure_rm_eventhub ([#519](https://github.com/ansible-collections/azure/pull/519)) + - azure_rm_eventhub_info: New module: azure_rm_eventhub ([#519](https://github.com/ansible-collections/azure/pull/519)) + - azure_rm_proximityplacementgroup: New module: azure_rm_proximityplacementgroup ([#501](https://github.com/ansible-collections/azure/pull/501)) + - azure_rm_proximityplacementgroup_info: New module: azure_rm_proximityplacementgroup ([#501](https://github.com/ansible-collections/azure/pull/501)) + - azure_rm_privatednszonelink: New module: azure_rm_privatednszonelink ([#495](https://github.com/ansible-collections/azure/pull/495)) + - azure_rm_privatednszonelink_info: New module: azure_rm_privatednszonelink ([#495](https://github.com/ansible-collections/azure/pull/495)) + +### FEATURE ENHANCEMENT + - azure_rm_virtualmachine_info: Add availability zones to azure_rm_virtualmachine_info module ([#523](https://github.com/ansible-collections/azure/pull/523)) + - azure: Add log_mode and log_path to azure.py ([#540](https://github.com/ansible-collections/azure/pull/540)) + +### BUG FIXING + - ado: Optimizing ado.sh ([#510](https://github.com/ansible-collections/azure/pull/510)) + - azure_rm_securitygroup: azure_rm_securitygroup - idempotent when args are lists ([#507](https://github.com/ansible-collections/azure/pull/507)) + - azure_rm_openshiftmanagedcluster: Fix an issue identifying a creation/deletion error [(#542](https://github.com/ansible-collections/azure/pull/542)) + - azure_rm_adapplication: disable tlsv1_1 in app gateway test. ([#544](https://github.com/ansible-collections/azure/pull/544)) + - pr-pipelines: increase integration testing timeout ([#549](https://github.com/ansible-collections/azure/pull/549)) + - tests/integration/targets/azure_rm_apimanagement/tasks/main.yml: Update sleep relate method ([#550](https://github.com/ansible-collections/azure/pull/550)) + - tests/integration/targets/azure_rm_appgateway/aliases: Disable azure_rm_appgateway relate test ([#558](https://github.com/ansible-collections/azure/pull/558)) + + +## v1.6.0 (2021-04-29) + +### NEW MODULES + - azure_rm_search: Add new module to deploy Azure Cognitive Search 'azure_rm_cognitivesearch' ([#372](https://github.com/ansible-collections/azure/pull/372)) + - azure_rm_search_info: Add new module to deploy Azure Cognitive Search 'azure_rm_cognitivesearch' ([#372](https://github.com/ansible-collections/azure/pull/372)) + - azure_rm_apimanagementservice: Added new module for Azure API management service. ([#333](https://github.com/ansible-collections/azure/pull/333#)) + - azure_rm_apimanagementservice_info: Added new module for Azure API management service. ([#333](https://github.com/ansible-collections/azure/pull/333#)) + - azure_rm_virtualwan: Add new module relate with Virtual WAN ([#329](https://github.com/ansible-collections/azure/pull/329)) + - azure_rm_virtualwan_info: Add new module relate with Virtual WAN ([#329](https://github.com/ansible-collections/azure/pull/329)) + - azure_rm_vpnsite: Add new module relate with VPN site ([#328](https://github.com/ansible-collections/azure/pull/328)) + - azure_rm_vpnsite_info: Add new module relate with VPN site ([#328](https://github.com/ansible-collections/azure/pull/328)) + - azure_rm_vpnsitelink_info: Add new module relate with VPN site ([#328](https://github.com/ansible-collections/azure/pull/328)) + - azure_rm_aduser: Add new module for AD Users ([#402](https://github.com/ansible-collections/azure/pull/402)) + - azure_rm_aduser_info: Add new module for AD Users ([#402](https://github.com/ansible-collections/azure/pull/402)) + +### FEATURE ENHANCEMENT + - ignore-2.12: Add 2.11 to test matrix, add ignore-2.12.txt ([#480](https://github.com/ansible-collections/azure/pull/480)) + - azure_rm_appgateway: Support subnet lookup for app gateway ([#451](https://github.com/ansible-collections/azure/pull/451)) + - azure_rm_storageaccount: Update azure_rm_storageaccount relate test yml ([#488](https://github.com/ansible-collections/azure/pull/488)) + - pr-pipeline: use python3.8 as default version,and using ubuntu20. ([#509](https://github.com/ansible-collections/azure/pull/509)) + +### BUG FIXING + - azure: Paultaiton 20210409 requirements doc ([#485](https://github.com/ansible-collections/azure/pull/485)) + - azure_rm_storageaccount: Allow storage account type Premium_ZRS for FileStorage and BlockBlobStorage ([#482](https://github.com/ansible-collections/azure/pull/482)) + - azure_rm_*: Fix sanity test related errors ([#506](https://github.com/ansible-collections/azure/pull/506)) + - azure_rm: Fixing sanity test issue for ansible 2.11 ([#511](http://fanyi.youdao.com/?keyfrom=dict2.index)) + - azure_rm: Fixing inventory issue ([#518](https://github.com/ansible-collections/azure/pull/518)) + - azure_rm_aduser: fixing update account_enabled bug in azure_rm_aduser.py ([#536](https://github.com/ansible-collections/azure/pull/536)) + - azure_rm_common: fixing ad related auth issue when using service principal. ([#537](https://github.com/ansible-collections/azure/pull/537)) + - azure_rm_aduser: change class name of azure_rm_aduser ([#538](https://github.com/ansible-collections/azure/pull/538)) + +## v1.5.0 (2021-03-26) + +### NEW MODULES + - azure_rm_aksupgrade_info: Add new module to get available upgrade versions for an AKS cluster ([#405](https://github.com/ansible-collections/azure/pull/405)) + - azure_rm_backuppolicy: Add new module to manage backup policies ([#373](https://github.com/ansible-collections/azure/pull/373)) + - azure_rm_backuppolicy_info: Add new module to manage backup policies ([#373](https://github.com/ansible-collections/azure/pull/373)) + - azure_rm_managementgroup_info: New module azure_rm_managementgroup_info ([#428](https://github.com/ansible-collections/azure/pull/428)) + - azure_rm_datalakestore: Add new module azure_rm_datalakestore ([#352](https://github.com/ansible-collections/azure/pull/352)) + - azure_rm_datalakestore_info: Add new module azure_rm_datalakestore ([#352](https://github.com/ansible-collections/azure/pull/352)) + +### FEATURE ENHANCEMENT + - azure_rm_aks: add creation and deletion of nodepools ([#440](https://github.com/ansible-collections/azure/pull/440)) + - azure_rm_loganalyticsworkspace: Add tags for azure_rm_loganalyticsworkspace ([#434](https://github.com/ansible-collections/azure/pull/434)) + - sanity-requirements-azure: Bump cryptography from 3.2 to 3.3.2 ([#424](https://github.com/ansible-collections/azure/pull/424)) + - azure_rm_keyvaultsecret: Conditionally call MSI auth when interacting with keyvault ([#356](https://github.com/ansible-collections/azure/pull/356)) + - azure_rm_keyvaultsecret_info: Conditionally call MSI auth when interacting with keyvault ([#356](https://github.com/ansible-collections/azure/pull/356)) + - azure_rm_keyvaultkey: Conditionally call MSI auth when interacting with keyvault ([#356](https://github.com/ansible-collections/azure/pull/356)) + - azure_rm_keyvaultkey_info: Conditionally call MSI auth when interacting with keyvault ([#356](https://github.com/ansible-collections/azure/pull/356)) + - azure_rm_keyvault: Set the default value of enable_soft_delete to true ([#463](https://github.com/ansible-collections/azure/pull/463)) + - azure_rm_keyvault_info: Set the default value of enable_soft_delete to true ([#463](https://github.com/ansible-collections/azure/pull/463)) + +### BUG FIXING + - azure_tags: Improve the documentation of tags ([#415](https://github.com/ansible-collections/azure/pull/415)) + - azure_rm_registrationassignment: fixed SyntaxWarning ([#427](https://github.com/ansible-collections/azure/pull/427)) + - azure_rm_adserviceprincipal: Update azure_rm_adserviceprincipal examples ([#414](https://github.com/ansible-collections/azure/pull/414)) + - azure_rm_keyvault_info: change description for access policies return value ([#426](https://github.com/ansible-collections/azure/pull/426)) + - azure_rm_*: modules: remove ANSIBLE_METADATA ([#436](https://github.com/ansible-collections/azure/pull/436)) + - azure_rm_backuppolicy: Update azure_rm_backupolicy add version ([#449](https://github.com/ansible-collections/azure/pull/449)) + - azure_rm_backuppolicy_info: Update azure_rm_backupolicy add version ([#449](https://github.com/ansible-collections/azure/pull/449)) + - azure_rm_image: Revert images API version ([#432](https://github.com/ansible-collections/azure/pull/432)) + - azure_rm_image_info: Revert images API version ([#432](https://github.com/ansible-collections/azure/pull/432)) + - azure_rm_openshiftmanagedcluster: resolve issue (#268) ([#307](https://github.com/ansible-collections/azure/pull/307)) + - azure_rm_virtualnetwork: Unrestrict the virtual network of multiple DNS servers when I(purge_a… ([#462](https://github.com/ansible-collections/azure/pull/462)) + - azure_rm_storageaccount: Correct doc for storageaccount network_acls options ([#456](https://github.com/ansible-collections/azure/pull/456)) + - azure_rm_storageaccount: Update azure_rm_storageaccount.py ([#458](https://github.com/ansible-collections/azure/pull/458)) + - azure_rm_datalakestore: Transfer azure_rm_datalakestore test group 10 ([#465](https://github.com/ansible-collections/azure/pull/465)) + - azure_rm_datalakestore: Delete datalake resource group after pipeline test ([#466](https://github.com/ansible-collections/azure/pull/466)) + + +## v1.4.0 (2021-01-26) + +### NEW MODULES + - azure_rm_route_info: add azure_rm_route_info module ([#334](https://github.com/ansible-collections/azure/pull/334)) + +### FEATURE ENHANCEMENT + - azure_rm_postgresqlserver: add storage_autogrow option to postgresqlserver ([#387](https://github.com/ansible-collections/azure/pull/387)) + - azure_rm_keyvaultsecret: add content type parameter to azure_rm_keyvaultsecret ([#317](https://github.com/ansible-collections/azure/pull/317)) + - azure_rm_keyvaultsecret_info: add content type parameter to azure_rm_keyvaultsecret ([#317](https://github.com/ansible-collections/azure/pull/317)) + - azure_rm_mysqlserver: add missing Mysql version 8.0 ([#319](https://github.com/ansible-collections/azure/pull/319)) + +### BUG FIXING + - Test_unit: add resource group for datalake store testing in ado pipeline ([#375](https://github.com/ansible-collections/azure/pull/375)) + - README.md: update README to include a link to documentation ([#376](https://github.com/ansible-collections/azure/pull/376)) + - azure_rm_deployment: update azure_rm_deployment document ([#384](https://github.com/ansible-collections/azure/pull/384)) + - azure_rm_azurefirewall: add support for tags in exec_module ([#360](https://github.com/ansible-collections/azure/pull/360)) + - Test_unit: disable generate VM using password for regression testing purpose ([#393](https://github.com/ansible-collections/azure/pull/393)) + - azure_rm_keyvaultsecret_info: Fix doc on returned field name ([#389](https://github.com/ansible-collections/azure/pull/389)) + - azure_rm_virtualnetworkpeering: azure_rm_virtualnetworkpeering: Fix unable to remove non-existing pee…([#400](https://github.com/ansible-collections/azure/pull/400)) + - azure_rm_loadbalancer: check mode for loadbalancer ([#316](https://github.com/ansible-collections/azure/pull/316)) + - azure_rm_backupazurevm: Add function that azure_rm_backupazurevm resource in different resour… ([#404](https://github.com/ansible-collections/azure/pull/404)) + + +## v1.3.1 (2020-12-17) + +### BUG FIXING + - CHANGELOG: Some corrections needed in links to get them to work ([#366](https://github.com/ansible-collections/azure/pull/366)) + - azure_rm: Retrieve computer_name using dictionary get method ([#368](https://github.com/ansible-collections/azure/pull/368)) + + +## v1.3.0 (2020-12-16) + +### NEW MODULES + - azure_rm_vmbackuppolicy: Azure Recovery Services VM Backup Policy ([#271](https://github.com/ansible-collections/azure/pull/271)) + - azure_rm_vmbackuppolicy_info: Azure Recovery Services VM Backup Policy Info ([#271](https://github.com/ansible-collections/azure/pull/271)) + - azure_rm_subscription_info: Azure rm subscription info ([#280](https://github.com/ansible-collections/azure/pull/280)) + - azure_rm_privatednsrecordset: add new module for supporting DNS recordset operations in Private DNS zone ([#286](https://github.com/ansible-collections/azure/pull/286)) + - azure_rm_registrationassignment: Registration Assignment for Azure Lighthouse ([#359](https://github.com/ansible-collections/azure/pull/359)) + - azure_rm_registrationassignment_info: Registraion Assignment Info for Azure Lightouse ([#359](https://github.com/ansible-collections/azure/pull/359)) + - azure_rm_registrationdefinition: Registration Definition for Azure Lighthouse ([#359](https://github.com/ansible-collections/azure/pull/359)) + - azure_rm_registrationdefinition_info: Registration Definition Info for Azure Lighthouse ([#359](https://github.com/ansible-collections/azure/pull/359)) + +### FEATURE ENHANCEMENT + - azure_rm_subnet: add delegations compatibility to azure_rm_subnet ([#264](https://github.com/ansible-collections/azure/pull/264)) + - azure_rm_loganalyticsworkspace: add force deletion capability to log analytics module ([#273](https://github.com/ansible-collections/azure/pull/273)) + - azure_rm_sqldatabase: add sku option for sqldatabase ([#291](https://github.com/ansible-collections/azure/pull/291)) + - azure_rm_aks: update azure_rm_aks document ([#294](https://github.com/ansible-collections/azure/pull/294)) + - azure_rm_manageddisk_info: add new parameter managed_by ([#302](https://github.com/ansible-collections/azure/pull/302)) + - Bump cryptography version from 3.0 to 3.2 ([#306](https://github.com/ansible-collections/azure/pull/306)) + - azure_rm_subnet: add example of service_endpoints configuration ([#309](https://github.com/ansible-collections/azure/pull/309)) + - azure_rm: add computer_name parameter to available variables ([#312](https://github.com/ansible-collections/azure/pull/312)) + - azure_rm_webapp: add support for multi-container apps to azure_rm_webapp ([#257](https://github.com/ansible-collections/azure/pull/257)) + - azure_rm_virtualmachineextension: add no_log to protected_settings variable ([#278](https://github.com/ansible-collections/azure/pull/278)) + +### BUG FIXING + - azure_rm_keyvault: fix azure_rm_keyvault idempotency ([#295](https://github.com/ansible-collections/azure/pull/295)) + - azure_rm_roleassignment: fix azure_rm_roleassignment idempotence error ([#296](https://github.com/ansible-collections/azure/pull/296)) + - azure_rm_roleassignment: fix azure_rm_roleassignment related bugs ([#301](https://github.com/ansible-collections/azure/pull/301)) + - azure_rm_autoscale: fix typo ([#314](https://github.com/ansible-collections/azure/pull/314)) + - Fix sanity fail in python3.8 environment ([#355](https://github.com/ansible-collections/azure/pull/355)) + - azure_rm: extend doc fragment from base constructed class to fix error ([#364](https://github.com/ansible-collections/azure/pull/364)) + + +## v1.2.0 (2020-10-09) + +### NEW MODULES + - azure_rm_backupazurevm: ([#248](https://github.com/ansible-collections/azure/pull/248)) + - azure_rm_backupazurevm_info: ([#248](https://github.com/ansible-collections/azure/pull/248)) + - azure_rm_recoveryservicesvault: ([#254](https://github.com/ansible-collections/azure/pull/254)) + - azure_rm_openshiftmanagedcluster: ([#276](https://github.com/ansible-collections/azure/pull/276)) + +### FEATURE ENHANCEMENT + - add python 3.8 support ([#246](https://github.com/ansible-collections/azure/pull/246)) + - azure_rm_publicipaddress: support public Ipv6 address ([#125](https://github.com/ansible-collections/azure/pull/125)) + - azure_rm_subnet: add private-endpoint-network-policies ([#256](https://github.com/ansible-collections/azure/pull/256)) + - azure_rm: fetch availability zone info into hostvars ([#243](https://github.com/ansible-collections/azure/pull/243)) + - azure_rm: make inventory_hostname configurable with hostvar_expressions ([#105](https://github.com/ansible-collections/azure/pull/105)) + +### BUG FIXING + - azure_rm_openshiftmanagedcluster: fix issue [#270](https://github.com/ansible-collections/azure/issues/270) and [#269](https://github.com/ansible-collections/azure/issues/269) + ([#285](https://github.com/ansible-collections/azure/pull/285)) + + +## v1.1.0 (2020-09-03) + +### FEATURE ENHANCEMENT + - azure_rm_storageaccount: allow blob public access parameter ([#219](https://github.com/ansible-collections/azure/pull/219)) + - azure_rm_virtualmachine: update boot diganostics config ([#208](https://github.com/ansible-collections/azure/pull/208)) + - azure_rm_aks: add load_balancer_sku option ([#199](https://github.com/ansible-collections/azure/pull/199)) + - azure_rm: improve OS detection when VM has no osProfile ([#197](https://github.com/ansible-collections/azure/pull/197)) + - azure_rm_subnet: support IPv6 address ([#240](https://github.com/ansible-collections/azure/pull/240)) + - azure_rm_networkinterface: add new module parameter address_prefixes ([#239](https://github.com/ansible-collections/azure/pull/239)) + - azure_rm_common: support azure-cli credentials with multiple subscriptions ([#195](https://github.com/ansible-collections/azure/pull/195)) + - azure_rm_mariadbserver: support version 10.3 ([#244](https://github.com/ansible-collections/azure/pull/244)) + +### BUG FIXING + - azure_rm_manageddisk: fix increments LUN on disks already attached error ([#237](https://github.com/ansible-collections/azure/pull/237)) + - azure_rm_appgateway: fix rule type reference error ([#99](https://github.com/ansible-collections/azure/pull/99)) + + +## v1.0.0 (2020-08-12) + +### FEATURE ENHANCEMENT + - azure_rm_appgateway: support version 2 SKUS ([#198](https://github.com/ansible-collections/azure/pull/198)) + - azure_rm_storageaccount: support minimum tls version ([#207](https://github.com/ansible-collections/azure/pull/207)) + +### BUG FIXING + - azure_rm_roledefinition: fails when `description` is set ([#214](https://github.com/ansible-collections/azure/pull/214)) + - azure_rm_virtualmachine: boot diagnostics related error ([#200](https://github.com/ansible-collections/azure/pull/200)) + + +## v0.3.0 (2020-07-24) + +### FEATURE ENHANCEMENT + - azure_rm_storageblob: add batch upload feature ([#203](https://github.com/ansible-collections/azure/pull/203)) + +### BUG FIXING + - azure_rm_deployment_info: getting the template_link when it does not exist ([#180](https://github.com/ansible-collections/azure/pull/180)) + - azure_rm_virtualmachine: protect against no diskSizeGB ([#185](https://github.com/ansible-collections/azure/pull/185)) + - azure_rm_deployment: misleading status code in module failure message ([#204](https://github.com/ansible-collections/azure/pull/204)) + - azure_rm_adserviceprincipal: invalid update check logic ([#205](https://github.com/ansible-collections/azure/pull/205)) + + +## v0.2.0 (2020-07-03) + +### NEW MODULES + - azure_rm_privatezone module ([#122](https://github.com/ansible-collections/azure/pull/122)) + - azure_rm_adserviceprincipal module ([#179](https://github.com/ansible-collections/azure/pull/179)) + - azure_rm_adserviceprincipal_info module ([#179](https://github.com/ansible-collections/azure/pull/179)) + - azure_rm_adpassword module ([#179](https://github.com/ansible-collections/azure/pull/179)) + - azure_rm_adpassword_info module ([#179](https://github.com/ansible-collections/azure/pull/179)) + +### FEATURE ENHANCEMENT + - add ability to remove all subnet service endpoints ([#148](https://github.com/ansible-collections/azure/pull/148)) + - update network client api version ([#157](https://github.com/ansible-collections/azure/pull/157)) + - add ephemeral os disk support for azure_rm_virualmachinescaleset ([#128](https://github.com/ansible-collections/azure/pull/128)) + - add ephemeral os disk support for azure_rm_virtualmachine ([#124](https://github.com/ansible-collections/azure/pull/124)) + - add FileEndpoint to azure_rm_storageaccount_info ([#102](https://github.com/ansible-collections/azure/pull/102)) + - add support for managing the 'Firewall and virtual networks' settings in azure_rm_storageaccount ([#108](https://github.com/ansible-collections/azure/pull/108)) + +### BUG FIXING + - bug fixing in azure_rm_aks ([#170](https://github.com/ansible-collections/azure/pull/170)) + - migrate missing doc_fragments that went missing ([#115](https://github.com/ansible-collections/azure/pull/115)) + +## v0.1.3 (2020-05-13) + +- add new parameter in azure_rm_aks +- fix retrun value docs in azure_rm_finctionapp and auzre_rm_functionapp_info +- change README.md and update CHANGELOG.md +- fix example in azure_rm_roledefinition_info +- add Icmp rule support in azure_rm_securitygroup +- add public_ip_per_vm parameter in azure_rm_virutalmachinescaleset +- add tags in azure_rm_galleryimageversion +- add sku type in azure_rm_virtualnetworkgateway +- add tags in azure_rm_containerregistry_info +- format azure_rm_managementgroup +- add new parameter in azure_rm_storageaccount +- fixes accesss policy update in azure_rm_keyvault + +## v0.1.2 (2020-03-19) + +- migrate exisiting azure modules from ansible core + +## v0.1.1 (2020-03-03) + +- add module azure_rm_managementgroup + +## v0.1.0 (2019-12-18) + +- Add inventory plugin + +## v0.0.2 (2019-11-15) + +- Remove deprecated content +- Fix galaxy.yml + +## v0.0.1 (2019-11-05) + +- Init release diff --git a/ansible_collections/azure/azcollection/CONTRIBUTING.md b/ansible_collections/azure/azcollection/CONTRIBUTING.md new file mode 100644 index 000000000..8358024a3 --- /dev/null +++ b/ansible_collections/azure/azcollection/CONTRIBUTING.md @@ -0,0 +1,68 @@ +# Contributing + +When contributing to this repository, please first discuss the change you wish to make via issue, or any other method with the owners of this repository before making a change. + +## Environment setup + +1. Prepare the Azure configuration file at `tests/integration/cloud-config-azure.ini`, a template of which is available in [the Ansible repo](https://github.com/ansible/ansible/blob/23a84902cb9599fe958a86e7a95520837964726a/test/lib/ansible_test/config/cloud-config-azure.ini.template). Populate your appropriate credential and resource group information. + - The account or service principal must have permission (typically Owner) on the resource groups. +1. Ensure the resource groups defined in your configuration file are already created. Recommended region: **East US** (not all regions support all Azure features). +1. Prepare testing directory (necessary until [ansible/ansible#68499](https://github.com/ansible/ansible/issues/68499) is resolved): + ```bash + git init ansible_collections + ``` +1. Unless you are running `ansible-test` inside a container (`--docker` flag), it is recommended you install Ansible and this repository's dependencies in a virtual environment: + ```bash + python3 -m venv venv + . venv/bin/activate + pip3 install -U pip + pip3 install ansible + pip3 install -r requirements-azure.txt + pip3 install -r sanity-requirements-azure.txt + ``` + +## Running tests + +1. Build/install the collection: + ```bash + rm -f azure-azcollection-*.tar.gz && ansible-galaxy collection build . --force && ansible-galaxy collection install azure-azcollection-*.tar.gz --force + ``` +1. Switch to the test environment directory where the collection installed: + ```bash + cd ansible_collections/azure/azcollection/ + ``` +1. Run tests for the desired module(s): + ```bash + ansible-test integration azure_rm_storageaccount --allow-destructive -v + ansible-test sanity azure_rm_storageaccount --color --junit -v + ``` + +Additional `ansible-test` resources: +* [Integration tests](https://docs.ansible.com/ansible/latest/dev_guide/testing_integration.html). +* [Testing Sanity](https://docs.ansible.com/ansible/latest/dev_guide/testing_sanity.html). + +## Pull Request Process + +1. Fork this project into your account if you are a first-time contributor. +1. Create a branch based on the latest `dev` branch, commit your changes on this branch. +1. You may merge the Pull Request in once you have the sign-off of two other developers, or if you do not have permission to do that, you may request the second reviewer to merge it for you. + +## Tests / sanity checks + +1. Please provide integration tests showing the changed behavior/functionality under `tests/integration/targets//tasks`. +1. Think about updating the documentation and examples for the changed module. +1. Please run a sanity check. Install prerequisites `pip install -r sanity-requirements-azure.txt`, run with `ansible-test sanity --color -v --junit`. Read more at https://docs.ansible.com/ansible/latest/dev_guide/testing_sanity.html. +1. There is a script `tests/utils/ado/ado.sh` for running tests inside an Azure DevOps pipeline. Unfortunately the pipeline and results are not visible for the public. You can perhaps adapt the parts of the script or use a small playbook to run the task list of the integration tests mentioned above. + +## Release Process + +1. Create a release branch from the target commit on dev branch. +1. Update version in [galaxy.yml](galaxy.yml) and release logs in [CHANGELOG.md](CHANGELOG.md). +1. Make sure the release branch is ready to release, merge the release branch into master branch. +1. Tag the master branch with new version number like `v*.*.*`, push to origin. +1. Release pipleline will automatically release the new version to galaxy. +1. Merge released changes back to `dev` branch. + +## Release status + +For each release details, you can refer to the [CHANGELOG](CHANGELOG.md) which contains the dates and significant changes in each minor release. diff --git a/ansible_collections/azure/azcollection/CredScanSuppressions.json b/ansible_collections/azure/azcollection/CredScanSuppressions.json new file mode 100644 index 000000000..c5a38bbc2 --- /dev/null +++ b/ansible_collections/azure/azcollection/CredScanSuppressions.json @@ -0,0 +1,17 @@ +{ + "tool": "Credential Scanner", + "suppressions": [ + { + "placeholder": "Password123!", + "_justification": "Mock secret used for tests and samples." + }, + { + "file": "cert1.txt", + "_justification": "Legitimate IT certificate file with private key" + }, + { + "file": "cert2.txt", + "_justification": "Legitimate IT certificate file with private key" + } + ] +} \ No newline at end of file diff --git a/ansible_collections/azure/azcollection/FILES.json b/ansible_collections/azure/azcollection/FILES.json new file mode 100644 index 000000000..d314f7c22 --- /dev/null +++ b/ansible_collections/azure/azcollection/FILES.json @@ -0,0 +1,7215 @@ +{ + "files": [ + { + "name": ".", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "requirements-azure.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0d3ab574844946a3cb88cb12cac8a4380b0c7c8acb99bed574c5fb481f6d88c3", + "format": 1 + }, + { + "name": "shippable.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "515333579eea59619360d72e38cc2c5c9a8b43ff59cd3ddcc12c5b0172553b4a", + "format": 1 + }, + { + "name": "meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "meta/runtime.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af114eb0e48183137f2fde23d6d77ea8970147a687c837e52a20cb78604b580e", + "format": 1 + }, + { + "name": "meta/execution-environment.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4ed6d806d5b0456d5d6ab6e46d68bdbe7b46b10b4352a80ae8b8487220337742", + "format": 1 + }, + { + "name": ".gitignore", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "702d6332d579931561017161f2d96eb3b023a006c94ed262d789411d0f21bd26", + "format": 1 + }, + { + "name": "README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "47923e21e1b1f0068efe3fed7da6302d7025459aef915dd0a2502d9645e993ef", + "format": 1 + }, + { + "name": "plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/lookup", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/lookup/azure_keyvault_secret.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b5657c1cee7e677d0c6bbb107978a38267d1e07d9b050fad19cbec413b41adea", + "format": 1 + }, + { + "name": "plugins/doc_fragments", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/doc_fragments/azure_tags.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e8c9d720dbc7605d3ee66799e8e81d0886e404c9a07b6b9b8edc844e0646de64", + "format": 1 + }, + { + "name": "plugins/doc_fragments/azure_rm.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e19514cc0040e3b896fe05729dc6d5c5bc22698aff5bfa12d85e5195fb019634", + "format": 1 + }, + { + "name": "plugins/doc_fragments/azure.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "57355ef9f93df996e93a187c7625073757b8a33df701383f756b3829457adae9", + "format": 1 + }, + { + "name": "plugins/module_utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/azure_rm_common_ext.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4acbc55dc634fdeb3e4c055b444523acf354e6709b958596050f163568592baa", + "format": 1 + }, + { + "name": "plugins/module_utils/azure_rm_common.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "606edcc9cca3e648db7d5c88e7561e8d4148c3cc523b9ad2142eac5804a81126", + "format": 1 + }, + { + "name": "plugins/module_utils/azure_rm_common_rest.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "53ae072d4b4c3943049b405b0872a222a28c29f4f64ef430b120313c25d0ea51", + "format": 1 + }, + { + "name": "plugins/modules", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_postgresqlserver.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ac36eabbde56c6ed8d32e1e708e86e0b02a6ac85ba8a1069b3cf2b91c7c1f6ee", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_datafactory.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1da36c4fd0cd07a25878ed7859039c2ddd6597bcfa01720a68347492dd101831", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_rediscache.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "557b83629f97b2b2307c641483cd158803a28659e5f8efd40aa25fea37c462e4", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_deployment_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "292c3a0f562b370b6f572a5d5b03e0ba82fd9609406b48a75e9d2d2b13b9f130", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_dnszone.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f825ef2bc97fa1d8bee35f4180d873600af36063458d9ad901dc4a3c660de504", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_keyvaultsecret.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "04d1a312ebdb537b49cf8fa68d4a3893f4d3d0bb8a7a820a3fc44f3c4e75286b", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_recoveryservicesvault_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7fbb41c71996147f8d3d87cae800d7e7282cac0c22344b0b24b1c5f264a73ac7", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_mariadbfirewallrule_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "01735c5b13076bf7addd1b0252a7cbfd13393cf9935fe8bcabe365781959d25e", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_virtualnetworkpeering.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1205bd5e4b0cf149badbf1548eb8053df46be138e94b5b83c9676087c15ba522", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_availabilityset_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c464064706e7c2e53445766acdd4a670d2b68a9b2b2ddf0ba4b9c5879dea325d", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_virtualhubconnection.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7360b65211c92c73677c4953ba206ecb13312762c2c620a57cff6333e0c2733f", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_devtestlabenvironment_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "92878bbd684969558369a571b7fd9ade1db9cdbd9ad89547d3f89372bcdf05e5", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_appserviceplan_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e4f0fc5c72974480e9bede435bbfc0648a4ce37fc33cd65388fe90dd9cb19fec", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_containerregistryreplication.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "477a310f9303dc7b696ccb724f77bd234b2c45ee4bfcbe2ca8f4cda4f1889822", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_devtestlabcustomimage_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ebc5d1ef44b5f012c125081034908adecfa3ecf8cfee882942e824bbaf51da8a", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_mysqlconfiguration_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c619b88c383da85e76bfbbeabf16bcb06d2f0e9e0d392fa08a6ae508b3de9d4f", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_virtualmachinescalesetinstance_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "56db10cbaef59fc0c443d9bd86bf5372802072cf5732c8066c7d218214c971b2", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_monitordiagnosticsetting.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5bf14614ea9745167d68b040bc2dae3e39d7f3571c4ac8300ee84cbf85384a5e", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_bastionhost.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1ec272790bd6b94d9dfbe0dc462248c0ecc0b79369624d15c03225f36dce8ee0", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_aksagentpool_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "29944645e694dc258752791bd4f12db6b342c03c349ab4fd02be4ecec3685024", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_adgroup.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bd86c61a597aebb1e178931d6aa1fe52d237033e958179e24c40bc8298612815", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_containerregistrywebhook_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3d83d00c3da287e1ed154c87cc0f5781827c67b9406d16eeed8708730a12a176", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_natgateway_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "021ebecacb738ca5dee8232ea800dc00df78d927690a63f4e0aacc84318ece9c", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_deployment.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2cf58936df2b1a3df183b6bd855272e83987692fb2cad8209b285a1ee4c4f426", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_appserviceplan.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c505925c19a228a7d632cdbc5e63dc1bbb25e9bac1d960f7906b55756fa32a25", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_privateendpoint_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5af5bd1029cb66bed1438a26af63abd3cd77b3ae8bab5618dac3b491dcc59d47", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_webappvnetconnection_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "29e0729f1439028f37a3c8801228728f91f99ce08194832d3981b0a7fc2f5758", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_privateendpointconnection.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9ecc4f6823fed8b227500c7714851dcc9f398424d5774bf89f8dafca9afcbef3", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_dnszone_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "05c9187df1957f9d3d1d748ebea17dd270c50a741846964e94cffef1f6948882", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_servicebusqueue.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0424ba6eb6ef76217410a0d8378302b466fe5b8c37be6d9081fa6a60dca57919", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_aks.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ea35fb2d395c703446cfe8386af7cdd580126028f89ee7d1900c6e97b3f90f3a", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_mariadbfirewallrule.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "002b28f29a420fc52cfa882aaf00aada340c67bca5951f33702899dfaba2bbb6", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_lock_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a21e077cc5c7a85b46359b6bd6a27f6ca81bc3f2b5c632559369c1ab784d998a", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_dnsrecordset_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ce13d83217e276a01bc5763cdfbd131090b35c64607738dc052b7ef9d25a3068", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_mysqlfirewallrule.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0dfc21fcc71ef8a64894af0c62a50dcb6deb17582a7f35bddc79dde51fc8576f", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_webapp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cee92a7e39dcb163bec1db9966454602f7bbc0f1ea102480949ce7c4fddf2681", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_autoscale_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3e14312a922f92c6ef0937f020aa7511ff9fd2759567c165796e257e65201b73", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_cosmosdbaccount_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b19072e8ae6d606c0edab3e407d2a405d21ebb8f3d614ef93beee02422037089", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_devtestlabenvironment.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a0fb63c466f07c95c6f05e5ff2d66320e5b37e8f1e3fa316ffe36a8428d9d1f5", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_sqlelasticpool_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b7a80a678294c636c3dd4bc5dbf9b7955a4d1aaaa40757b2a24922072e71543b", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_cdnendpoint_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "46e3f5c925eec47e7246827a356caa6c03547e647186bb3b6aeb7b37a5099ceb", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_vmbackuppolicy.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "44f49f470f1364e294d105ada7d43fd907e0988415a42bc6fc778617ba752b5c", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_devtestlabpolicy_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "42a4c839bc9edacc106ea98025e391bfb2be640ff453a23fa915bf5483c8ac1e", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_storageshare_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c30615415440ade0b39196ccd619a923cfce2162548c18c7edaec635f46e84e3", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_hdinsightcluster.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1f9b621f938b0570eb3e9515725997808d6f2309ade3bd61aec462f27d0b31de", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_openshiftmanagedcluster_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6ff2314a47bf072921a74773ad167ef9061dc5ad8ad29c54583f6e629ac285da", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_subnet_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "696339e48df4d1c0f6ea50fb2036f43dba92ad6097e102889d85c0ad9b764652", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_iothub_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cf4cfbd73db4d003a322d06112a652991e56643689faf5c4ceb3c621a41e4d96", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_resourcegroup_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "de3d87159dc3b1dfc118bddad95a4666bbb767b13ebd793b6dfce8016b19348b", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_resource.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5e6a719d9c2c10e6b8099968b9cee9dc4a33d70610e7166fbe7d9b2900cc63bf", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_mariadbconfiguration.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6d8d79de6e81076cad897a9b1b12a52462d4bf15505616fdf3d170c21dc2ab42", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_vpnsite_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "78e052905dc10bcdba43971fba9600830c127b9255bd59c17d8b948e07385adc", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_notificationhub.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "542b060b5a8b5a434eb16d3258a235f58c1af7480557ef0269e98f5008d5d5ba", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_apimanagementservice_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c77402fee55cfb0fe5e7b24475dbb16dd8933bbf98a9787077d166883bd6006d", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_roledefinition_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "40f784387f3973478e4eca1c3dc95d03b607684eec3a503d9539ff912bdb065a", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_mysqldatabase_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f61d5d8e5ab4fe38d062a4be11e6da2e7ba8f9570cbd57da31f9b5d4f7c20035", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_route_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "38e2e790759c8cd148397c1562b5c2d11d77e602c976706ca6ec47ad5ac82e7b", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_mariadbdatabase_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e2beced9a1e5e206b51add106ec935f93d11a382fc1cfb02cde4653c8c0e8e28", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_mysqlserver_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9430b33e1b8689c451779a9e510c97f48e65a13e85d252b123a0fa367d2bf68c", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_resource_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b95ecf5c88da9be4bb9b5de2a69a5c9e012e5fd7e78a8b7ed4683c098386fe94", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_sqlfirewallrule.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "df437f8d158a6439ca3ce9f0bf9521abf56deda973797bc43519756d135854c5", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_devtestlabartifactsource_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bb0421601c84d90ecc996ead0d3545e82167ea8f836abc75e2c2724763e64c08", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_gallery_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e358d5d80aba5329f4029f1e4fe71ac7265c6af33b18b5e3cc7f43acfda35f93", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_virtualwan.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "649f6ca93b8c5c39177032a048eb69ea2861811c04b2f88fd26ecd6ff7f9a7f4", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_devtestlab.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f13d634939a9b6ede4232809a91a686954248c9358bffb9397bb1a46af9790a0", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_image_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c6dfd163560a3389f0d12d364e61a5a6e6bb87f50b2fc08965b19285002b600d", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_automationaccount_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d58220fca342e7d1bc2facc36fe1350609a1bf36f53f0a517a6057909e18355e", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_aksagentpoolversion_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ea88bd02b62e542b456b46676784f196500e628d747102b3fe4738b2b972d992", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_networkinterface_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3304875d8899cb9e33425372bf19d0e880979a4c703b4a8d1ca688896084d64d", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_virtualmachineextension_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4462ee68f8753800127b29f8cb26e4542425a39c5d9a4eeff9ca34c98a01d52b", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_loganalyticsworkspace.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c3eaaf717a8dcfef31dafccea334e863980d7f5225f83a7f65499998c2aa7eef", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_privatednszonelink.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a206192aa45d1f30ad91a779cb0934c49fc0c01f7da18a07aebdecd0b4333949", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_ipgroup.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f2bf42d8b4c828a40e8ed0aacd42d2bce3a128e71d1d0a537081e204d761a8d0", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_devtestlabvirtualnetwork_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c5d5b78feb99ae118bef399a1f39c9689c293d1b9ef5a120c0f7d29bdad9c871", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_apimanagement_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "92edd852dad3cdba63ed9ce78fb0891f509dccb1e1e21acd73547e681962f6e8", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_vpnsitelink_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c2fa6b67b6163684b312a9af9cfe1d92364ec7f2c3b240e249d54df344a08754", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_adpassword_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cf9a32a1b8674d0bce0b22c0a3ca953195ac6b83bd6e6ae6096d83dbeea19f39", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_diskencryptionset.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ad75019b7e2c48efb740547e6b8664dc76dbc7f46b5ae4f1307dc4a1a633a87b", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_functionapp.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "59b5ae364fd065ec8a59fd413934c477d21565748f11e588bc3ffb94431d6333", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_storageblob.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ade223098ab192a1b165695596036b1f9f0b3871845b99f77e2093eca24a76c9", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_monitordiagnosticsetting_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7b04ea29c1795db69f2e12e716e8e09d8ff83fc05bf93a010926ef044cced8a9", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_servicebus_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2ddc707b76677db9631536f8d7e796e65cbc03347fb9b2206fffdf01733729fb", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_devtestlabvirtualmachine_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4746334e6c0767e153ded7c89b67fa2ad6b341d16d600b8451cdc0ec23ab3b47", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_azurefirewall.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b237d06a26a381ece50abb9a390525e2ac8d41216e47db64c6515f2a7cf2191f", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_virtualnetwork.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "154db13a7f90bcb954624802c8999a0f2b636c79b185656470fe441e3d050d74", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_managementgroup.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9ac0238eb048983f35dea0b775c89623556249eedae511a6d4583c9d09c6ebf3", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_privatednsrecordset.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c7e50777a183ab50fdeb35947617727032f293979436bc36c611f4138d576173", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_webappaccessrestriction.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bdb472e0f09598e62af2730b39bd2684d141d38ae41837f132c27e8a39d626ae", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_virtualmachinescalesetextension.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "68f54cfbc3dee06ba295677701af2e25a718fc258ae6d832c77de8ede0f6086f", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_devtestlabvirtualnetwork.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6fa7a70d833ea55daa428b6881a741c0deed4a0b82ae167a5d6802199f0eace6", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_sqlserver.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4121ca062182f651d89c0729f34a4724a39520549c32f63231c74ec7b66a4962", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_virtualmachine_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "89ab6732833bf9bbbc639542612449946bd59db1385a79e368de8e47aa9371f0", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_proximityplacementgroup.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b509d24b53d7895f676c980e139dd5e463bcb43cb2f803b9c4259b397e6887a7", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_galleryimage.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eb81199490da37abf78eaafb86dab3847c06d3cf636ec92264461ca6d36ec43f", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_apimanagement.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "15b454fcf0d36fa5e33dd0c364288a8d4b775e0d033eb29c73c980c811cfe86b", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_privateendpointdnszonegroup.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5bce497ed8f0fe499466bbd20c1acc772dcd458828cca8fba6efda9db9ff8fcd", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_bastionhost_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dcdf18934767abd5f6912ce0bb9cbbff6954c79fab3998fdf9ea3c75f5f9d604", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_storageshare.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "82210aa73136b890cdd41a098e28368d64b17744e33d8220c3b240645ceeb461", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_postgresqldatabase_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5f14bd4d988a83adb45c7efd192d78ace3049f8a114fb13887e04bd5d2810993", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_virtualmachinescaleset.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "317ae1eb91fb3d44053c307a8c48dee56f20f254801f496fe0e994a40dd5e9a4", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_publicipaddress.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a74b3d36674d81e191c97c89b7a62af410e11bffa6584324f5fcd09af93eff63", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_containerregistrywebhook.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2a51de04003a696a7787d1d50feab809d4dcaea7d949410fd7a714fc19323ebd", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_servicebustopic.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7384a10983b632462a263c9a1955a37a241df62d6ff99f5673ddf81bb4d597e8", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_networkinterface.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f35a9acfd351c7634c475d7de186ad6347f1daa3e412937554ba1a21d5f5a395", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_natgateway.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4bf1206a802dca51ce3aebd308aa2d1be7d5c92ab7cc9c1627f24e7129c9a43a", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_backuppolicy.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "450276cb63df1322157d6f7598a4008100b39db42e46284fec965ae6d889e5c0", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_securitygroup_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e86da6e0083e46c3ce35b5ad9cce5be81e674f58adb88ad34e25fc450ae5f0e6", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_postgresqlfirewallrule_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e65ac200019da53ae0420ca181f815b1bc4164382312907a8f3738b57e1113c8", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_virtualmachinescalesetextension_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2a3d909b1b73e9779524abaea50c0b7c0585e51449eb419e2b2655061d1d5e01", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_galleryimageversion_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a516518ebf35ff2ca1033e9b2e5ceb2b2280841a163a1f6914ab9bfe32238e15", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_postgresqlconfiguration.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "88f2607f46f15e8da9955e0c887ac6d238dd1970f7e99ad48ff82679add2717a", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_appgateway.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "81616d62785ad1b0c868a6cb8fc6135388e188b8032d9370cfae013f9d0245b5", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_registrationassignment_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7d9177184a680209b8e56af0dac845509316784cda76b7825c8a8f80c9e5556d", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_virtualnetwork_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "487edc7987098e0c1f5d053a71f9ce405f5fb48721e39eca4c3078e948a8fcc5", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_automationaccount.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5dd5fd70d816d33733de4bd4eb9f7569038760a5d13f9f932d7cc94b8077bbf4", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_loadbalancer_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c3695020309683c704c9372d512e39f2bf9f831d31f4ca6ba49a8acc65037526", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_virtualmachinesize_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "58a1ba100e397ef2512a9de6cbcac15175fe5f5beba9a6a785e3f4485583bf18", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_webappaccessrestriction_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "35ba519123a781331e81e7d80a138c74b2a201f1a8834cb0c02720eec69e5869", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_webappvnetconnection.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "80a7052dc658e5ce741d56bc35239816efca9288d3d7a05ab432153cec23fe04", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_recoveryservicesvault.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0418507db2bfd0fdcf67872ee7dce7a760b512aaaacaec837db424f77fbc8d42", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_containerregistry.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8cc2c27f4472a39809871236626b260bd63d83f25ad2252d6cdfcf198568cd59", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_adapplication_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2290d31890d77182a5299231bea9ea69420c330cd020361041ffb4b7825c9dbb", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_adpassword.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ae544ae7af099828e9a7b584903a61b10e4fe50c7222573db643f10c755e455d", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_autoscale.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c8ebe2e04d994d192a13e01b6ca30ab1bd8f2fd071ff441290e5bc113aa86b96", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_privateendpointdnszonegroup_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "70582f1fa2c40f77e166f00242bd6bff2e7c018368beaa2ac960328c341faa77", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_aduser_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "38b7483a49e73fde79815078b47f00b7fc062eb5e91c660715141800aacbc227", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_vmbackuppolicy_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2cf17b31cc74838e9ca66d203999d3254cfaf6c602dab7c22101952e15cede3f", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_adserviceprincipal_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "60c7a8360cef3b04f1637660dbd49d92384896e8b8388d6e51e1b2cb9323f1f7", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_ddosprotectionplan_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8bf47ad3f075bd81eafd9a3b7f2a18e71c267d0dd34e971f64a38c3afc572cf5", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_hostgroup_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "05d3069e290c810fe454923e22f4aae4932aa8924b12d872039904d87b779a07", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_backuppolicy_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "df3f35c4d1d82e9d036782bd83fb11b8f761a706356360a540e44a36b5d1ff5b", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_storageaccount.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c194d6385c64558c4fc8c0ec572764df5bac7dbe60774a2ef60bd8b9a46063d1", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_expressroute_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3dc3b73518d56b39abe77177143426a27ed1605699ba751395655df469f3cf02", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_virtualhubconnection_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c73e7278360297a3f1f3a33404c20b2bc84d264dfe69e78a7dbefb94cb866c52", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_containerregistry_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "088eb49fc6d399e81acfd21ef6cedf92455caec49bf3273153ba0b6316da136d", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_monitorlogprofile.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "df245b70ab6d3d354e89cdf92d311e7d94f0b28a4bc0a9399712994f31b8ef68", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_rediscachefirewallrule.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "47fb0d6e84f698478cbd319c6d058f7737b553be61896534818934ca9b3088a6", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_trafficmanagerprofile.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "65176590401d92a76ec626a551e4c464d09b01299bbd476abdf591424b8a0e68", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_virtualhub.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7e49a0de4973e1c91d190383b5c37961937eedb22592a11362f699e0b4453151", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_managementgroup_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ba1f6df6a9e04361433214fd6117b34140c394af1e9e7a6888f91f8b762487a7", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_galleryimageversion.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b8e574bcc037636d6ba0f767831c661aeb8185b8bc6967e83f73c1aa3452187a", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_cdnprofile.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8c49948c21437f22861fc3df155a4f60156800366ce1f5d39c58ba9125e08b68", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_iotdevicemodule.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4a15152a01225166acf0aa2cdf8776c151fd3ac7d765da506cdd99bd9dd79d60", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_virtualnetworkgateway.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "70fb310b8119486250298eb4b0583bd82dbb5d14a6a6c52208f68057777d93fe", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_image.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b7438a302d8de7f60006d5d0e79faaf574f630c9eba9f5126d6c9667d3cfa728", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_aksagentpool.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "46507ba41eea54bc1041cce2026c1c91e74bee0a1c03396be763cae6477983bf", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_loganalyticsworkspace_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2ff6f2ee119001765d22ed1f643f9d4a8604b7c746d82904608cd35e256a6fc3", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_privatednsrecordset_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ca64fe771c93ee29952634b1acd63d8ae35c0f36b4ef3a33f6ac1ef0994e2e42", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_datafactory_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "33600c4c03848c4e536ae6066cc029732a563d7810c61eb06366715f728b8140", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_mariadbdatabase.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "68b40425e3e53edfd9c674dc3d3ecb6ce9acf6b38fd1ddcfc12f63d3f87e43e1", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_loadbalancer.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "498c78e74e6493af9b701f69cb56fa94ae3b64eddbbb507ca6e1ace8d6f56fb7", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_mariadbserver.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2f2f2bd865bfb2131db7541e5a9628d1b54c6e53e96cdbf1618f21697aba8377", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_sqlmanagedinstance.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "370f538327f02e8e9fd308de4d4489a883c54d3a8a4806867109fdd8dd9e6c2b", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_roleassignment.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f2cecb9f769039249c55fd4f02deda468edc587d0e63d9a9f32f4c1817289416", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_apimanagementservice.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "89c9a6c332a40377c7657bef3e33fb90b87c8d88839fb6e2192cee39e2fb6a83", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_virtualwan_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4542ee838ae409b161fee050aa55a071afa71a294f2c1128b615c5eeb1241941", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_mysqlserver.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bd3e6f09aa6ead8a155f1e6630a6bcf53e15c342a21b57803e7f12a15438db22", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_automationrunbook.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3cb4caef7658d8d7dab697bb4659e96ca52fd32ae8a20588d208d2d06bccccf", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_iotdevice.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "05dcd59d9fd81f9c45b7549b0c4e1c108dad9cf12ccafdb8df322a5f8c4ad716", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_devtestlabartifact_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7f474b7c34cced12411188bf6b1621376c5ccc4c5b752cff100db6ac5ee1dd20", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_postgresqldatabase.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "83531748d065461c880b055df18853d233c48197d6e4d3dc0075878fab5f788b", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_firewallpolicy_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7640f87ea3490451f86cd6552187bf7d898ac14dabfd396c9d7a05c7e3927ff5", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_webapp_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "68a7809631f30219be67fb49904890dedf33bbf0abc1c543c0c358cde36e9e6d", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_webappslot.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b3f12d2434943e10d2eb06054014e166e5568d746c169c8fff4486aa043cf9b8", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_cognitivesearch_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ff6241aced96abf6d228204778b98af2ca39d0d800b078a27d47164084c3c9f6", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_sqldatabase_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4f6afeeb88694de978ea2ed8b21a25077459c6753b9cbbfcd64a2aaf1ae2fb3a", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_galleryimage_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "de802be0f7bc024b6e899656e36314bab3ff8c45518fe700a9fe28b92e2ca12e", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_diskencryptionset_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "586109c40b10714b5280368db2802fca8d4fc87f797e2b0d5084bb5e7a4df3df", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_aksupgrade_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "db9ca4e8e6a327bc77383fef268e81a9d70bdf3b41c4d031bd5bd5bbb9a7d325", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_sqlserver_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "297e7c9a749738d8faec1588f18ffe87113b808e50efd5216aa1f338f5360b0c", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_dnsrecordset.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "024875c538f604b6cd017696843d9680d15e9ad2940608542ba4632ed59cacee", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_backupazurevm.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b773c5bd5339e9b95f521bae7cee3eaa929e72b10fa71494bb33dee1f11196e6", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_keyvault.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f41b7403d3c04cad0a262ed370792c11ec3a75f155b08d5c357226e4ede0fe6d", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_privatelinkservice.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "48c03bb5c957cf1e46d42843c7af662c781be6876007def2dc4cde7709385e89", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_cosmosdbaccount.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ada84e6e50a2662bb66c041e69706aceba5bc0f237f308bdbaf4d5d53e6cb0fe", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_proximityplacementgroup_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3e049bd091a4b88b2d68b6462d3cc15d740fcc65f5d3e310496513be40a83661", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_applicationsecuritygroup.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b71a4b975ba9c59ecc4cf8199d3008f0bf7c73e27b3a7b6c9a43c59a41d28236", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_sqldatabase.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "86a0055ee5af7615250de7b9215e0e96e1c1f37d3900155f4cbd48b2a540f70a", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_hdinsightcluster_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3397f10435940d52ec784f6f4c3b3d379e7eebd91e1179fb8509d73250bf8d0b", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_servicebussaspolicy.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3729f4dba8215497e263dbf466a1c3444469ced9acaeb80fbab328806306036b", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_virtualmachinescaleset_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b8327ec79e23b105eb3bfe8f627e8de5319e3e6641aa538f4eb9518592286b51", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_privatednszone_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ff7ef289202833ebfbe72ed58a26a8a2673d797bb47fd4c0a816557a7f58b769", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_cognitivesearch.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a9bd5c1af6a336f864f15757a9d66acbe033f3fbcb8a84e8c10e9b1ecfde60b7", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_iothubconsumergroup.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6165d86ce42742d545d0a93a693f476fe4f1354d37ada4626f4bc1f2c2341665", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_backupazurevm_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1a9c7973369978cf2a1d7ea9f909f3bc24b486134d5efc67db3f3f2b6d258e49", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_privateendpoint.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9cf71bc67a11a058b6eac5f7c06c3a003ecce968f6fe09285f43dbd81f91fdf9", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_privateendpointconnection_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a1c9260c99294e6c9f4579310504bf656d46b2ed16d22188859945cc53563fc0", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_aduser.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cbc3bc935e68d08396690815c7ae2902df26ba76b3dff86c46068dcb339af380", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_mysqlfirewallrule_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3c208e21075ed318d02d1929647894adb0ea5e6a509e5c888ba21eb6fd9fbdc1", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_multiplemanageddisks.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ab5536fb372cd70f6a76146dc91981735af9ae402734660b6ebd2b638320cd69", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_containerregistrytag.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9b6a6b029d0bff63cc6fd9cbbbc92f44337fd23d460747b942a4caf32731ad05", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_keyvaultkey.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0efee578ee17f9b39c27f82c1b12bbfe5b29326fb20f30cd9566ffc0a0d6fc3e", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_devtestlabschedule_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "725084fb31a59725da35e985c4cb6b5a6a1c26e0a5b1e0469a9b1e195d25714e", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_postgresqlfirewallrule.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e6c2c640d0f0df2b35c5d64b8a6ac089a5256d169e148cab7ec5f591c2501803", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_containerregistrytag_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "99cde6ea4e4b3f5216131583a7f2ce0dc1777e836d93089733da276895c65b44", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_openshiftmanagedcluster.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4581968e7d86e0feefaebe6db9719b30ee724b3749d712a83fa74a388cdbff3e", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_storageaccount_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e9f50809d29dce63792c856534ea339a448945bc2557140e4285ce1d1eedbbd9", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_vpnsite.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "23ec84e9336fb715e9d733c1c7351f7692215baa5c974d4ea2d191da59ed593d", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_devtestlabvirtualmachine.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fd4ea797fdb70f8f7524c53b112211229261b569cdf11e3d6ca86e6dad84ceb4", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_postgresqlconfiguration_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "21ca71cd2afd400b71d7490ca240db6c936f8ff230b45d55cffe7374ab3dfa29", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_adserviceprincipal.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7b6b3b609ecb52a78f5b50c825242781f62232e2022801a3bc40aadc3dc7501b", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_publicipaddress_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2d34ab141f819446533effde97d356adae01af9f17c77687981a41161606ee5f", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_manageddisk_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "42d1fcd7fddb4c8d491dd0929ca0d4baa30380c575eef40ccd96858ead58a30e", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_firewallpolicy.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "781d2da4df538c4d7af6ce544649bd0f706cdb59f65a91f4178a68d1f361cba5", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_resourcegroup.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a403c77e2ac4037370e7ec2591eb54412be483728ade4e9c4555509cc5d9cb79", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_rediscache_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5c674ba80adc3248e6bca9f65fe8be32ddd8598815b7872d76223ab6d0c31347", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_automationrunbook_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ceb0f3fa7498a033ae3e518cb6ea458fe86021b254dfc79347296051215c2c6c", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_trafficmanager.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "415d86c6d0faa2177a8d620e0dda1d0e7638b37d9ebe081ee0ccdf171c0bae2a", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_trafficmanagerendpoint.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dad1b818859460f2f31fac35c20b6404b27e53c7ce8ca48b8eb7f0b4a1d35d63", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_routetable.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "146505731614ef21002261069557950a8ecdb59de26851815e80a8efe57ad36c", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_aks_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1c33473a8a593fdc663efae33798ba557973a9fdd7bd13f6f3d28018cd4e583b", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_azurefirewall_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cd9f899b87d4263474f2e16873be3e7bb864aaf883592ffc766d1f825c9157ee", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_datalakestore_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3a20c6cea4fc464dbfc27684921cad74f406cc623478cd6ca2ecd9037ed3d1cd", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_iotdevice_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "44ef66db0ee8e5c2241705d852f1da9a0de914890af18dd863dd1ffc1aeb9ea6", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_devtestlabarmtemplate_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "68f3bbf22df1b1932233c2d83aaaf48cb9bc8d6ae951b122098a74815d936f19", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_privatednszonelink_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "93d488b4238c4631765dff43c9a6978f3bef9d8ff35294dba4f34772a85a4284", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_virtualnetworkpeering_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "262200fce2b7ed028d68c9d6aefb9961cd27b201a127e0a172d0d38f6cb2320a", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_ipgroup_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "49b6efb5ffd80fc37d17c320869411ecdc589b2bc857d9584fd273b5ee2f818c", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_eventhub_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4b233adc26c96f491ff1fca40bf7ed4b4348d7b7aee123b36e15f3be74e048b4", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_gallery.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2f15cc6e7b24ad0093a915c0bc91a781adf80163b81739b3c81634812f065cf0", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_servicebustopicsubscription.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "944ad33c591b909af10403f521bde04c898b571194ef607e18cf6834c13c1d3f", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_servicebus.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e5ab60ebf5c75e9ad2ce94633126aec9e0cd514ba2f0906a92ab607645c96a25", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_subscription_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3c9554454fef7600e1befca0a7b88f202e142ab7fd7727ff327df3e4bb0829fe", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_routetable_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "da7f7d8eaa544f515c76fa513b984d641f362d8c66b8068939854e7825ec517c", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_mysqlconfiguration.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d692d9cc4732b17e1ef96257af505685ecd5389d1fe12abb5c70121e481260c5", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_lock.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "858b4a4357dd80fbefb9c62265d91f31019794c9077526f6703c95b5d7c077a3", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_subnet.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a2fb831b234a530cf52f264eb7e98857ab06b96085cbc706a12d5b20366e0632", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_sqlmanagedinstance_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b818ebcae8e63d025d9c53bb531a77fc656b13e14e6466cfd555811060668797", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_hostgroup.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d9b45a4241eeae89e588a7353afa1921b1b867ec547345212ee54033291cfbf8", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_ddosprotectionplan.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9792e817aee295bad9eb0c3a18830487a05083a13e214f638b2f15d8461a88b3", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_expressroute.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b59f1aef6819e805e56ec5ae5a56730b09b6fa2efd80391446e11edb7b73044a", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_functionapp_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "06b0906d76d55b0eafb3b427a0b945cc869b010798f262dc16f7f91442da2baa", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_trafficmanagerendpoint_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "487daa4470eab01a47f548aee589e40d31891767490a49a9c8b3b377beb96c06", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_devtestlabcustomimage.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "df3b7beddbd323f387134c8e22c190f9fd3fa452ac3739460e9c6808bac6cd37", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_registrationdefinition.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6497ac35a0079109f824bcaea8fa11681c9426efe8c9846007de2578c1230f75", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_availabilityset.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "77f4a1006580fe15c6dcaaad9d45e1dafc0fb2c93832069fbdca274cc061bea9", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_vmssnetworkinterface_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a102b9949b97a9e7b8a2f1821dd96a9a3cd3bb3244e1fb41b60777b71aa4cdef", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_aksversion_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b45e717994e236e77ab4599a204eb61d193c7d176b67a34d3620e68aee5edb3e", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_cdnprofile_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a3c9e4f83f134bc08327d3d3ab37cb5f8e09fa5fe48b309aee5bff655158bd13", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_adapplication.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a01ea12d86aeee8ef91118aa965de56ca0d09e2290c56041380d8c27f561f0f7", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_cdnendpoint.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9c1e82cf33936851f15e067862abb2b92994b4343ac3827f70c5a82b08808dbd", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_datalakestore.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3b4df0d580c0761dd3ade91498ed2f2e4529836b4c35b2483e41dde8d0abc262", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_adgroup_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "32c16ac3ebd1e2b3024525c9e261e62d30456ed332797c8b0acf3828a02433fe", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_registrationdefinition_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ece5750e5eeccb32bc4c4eccc28d00cbf14f6d2d9216377ad026ae16d01032f8", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_sqlelasticpool.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a99a9a0336fd66f99a0d20307eaef6551556cfa50e3b24000e88c37fae5c756b", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_keyvaultsecret_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "722ecef7de830faf2bc757011605ad112a4185452706d99a741e88e5a2612285", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_keyvault_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f404612876ef89c63e9163e38c3c2dc922a849ab3aac3cac42cc4a9b0b1c5cc3", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_notificationhub_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be0f91c3a0930f859ffdc567b6cf28746c41791ff2105790c96419aaba14e79f", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_appgateway_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "34b398f5ce06e7c0071035aa6d6e1d710240699738bcb3341ca418782e2b0465", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_virtualmachineextension.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "09bd02e50c12aa9e602e8ac7b4312b2619be27b656114309625c9aa39fbcb97a", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_virtualmachineimage_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "67c57e01af0dff1896e7a22a334388aca376193f0faf86168bd67c53dc635bc7", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_devtestlabschedule.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4fe044b4199eae109e14370d764e8f52ef37d4244290a66d71409214c1efc5e4", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_registrationassignment.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a20f6c329c8d6c8cf53f32d506523f14e64fb05f855d959a5e787bdbe3042f74", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_roleassignment_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "78faee09138728e5731ddc316b5ee3bcd05289f37b2161331c0fcc76034c95a4", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_account_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d2430e9c6d0cb9fe31a461226dfad1afe0f102a83e702ad877b65935f865a8ef", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_route.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "04a75e4a0fe7281c28779eb29c43a3353ffc3dc30aef98926cd651630086cd28", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_applicationsecuritygroup_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8d929c69fe1691d7658fcac7d1a2ac92115ce062efc6c866cf9dba8df2900064", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_virtualmachine.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3e50bdff05dbc06a630998c7f22245bd614a2cf5af31568464bd0b9ad1407a21", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_privatednszone.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3f9cd0311646895890ed0b353924c64b44699e70ca7bd2c1f330cc58d10ebe18", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_postgresqlserver_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "df4291e5a31ae032f6e03646266d5220e921718fd59b4ce2dd38f17703681352", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_mariadbserver_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5bfe8de591caf07cee5750f15786ce93743d726d8ae3f9951a3567a3d8bb946f", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_containerinstance_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b0bcc904bda5566b15648f6f311b3300bda6e8df43b46d8a298f7c233a7f11eb", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_privatelinkservice_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "02a72af00f3442e3cb5a19ba01bffc4cb2d47f646abf1a5ae0a125e80a8313ba", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_trafficmanagerprofile_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "857597ed8719996965b0796e71c8223f05b789061f3caafc9a808b271858a2f6", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_roledefinition.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8ea734515f2f242298f189539766283d2de71d063864ec0c95efe0cb1739b9d4", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_keyvaultkey_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fd68e08a8db49b8e7516600ae85b3616edfb578a163eb7f3e7a9c136aa93148f", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_containerregistryreplication_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b3d2ad0a238f9493c8e20fe7806d07389e0f29e4167d4328820082b15bbdee5c", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_containerinstance.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "02edee35985e15cb2583e3b5a9db8bcfb0f2cd1d5bff352961a6a49f38145ab9", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_virtualhub_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ebb16f82d8d56435ae9497852954c9f1b68cc7b7125a10fb29dbb8369f76a019", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_mysqldatabase.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8aff767afda022f2888a16c43ce6e5c3532414a1cc8bf5d41ec124b3d8ca16f3", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_eventhub.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "37931bcc8ecf8b6774e140e52951b74cf86793a42b1b38811fe8dedbfcb907f2", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_devtestlab_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "01f4aabc9e6a62314de3b6568a54d0b9ef1c0e98b651205d852609e7ad3f8f6b", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_sqlfirewallrule_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9351536c53ce290bea263dd9dc10ce02965d534f7c4edeed173aeb4c32138476", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_mariadbconfiguration_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "efc537a4c1ed138ba89879455f559f9f7f5bf9badf5c9ab8528c702bd8d18b78", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_securitygroup.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1526773fbfb42677ca222a4ff7eff989898662619e5239388e3a0b6392f6fa6d", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_manageddisk.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "13eb09ecf5eab164328b11f0a181233f7e07ab6d972f6c68b0cecc1070da9103", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_snapshot.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2c1750e5df466813edb9b08b91d67089f56ed2a435c7b6396446de07b5aad243", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_devtestlabartifactsource.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "416803d8fce98f31c25685f9233c29fe952b624a64b36b94a29ea0d282159d6e", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_virtualmachinescalesetinstance.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2da99cb8865d4373b1b717cedf04da34ddc4dfd9002bdcec80554572de020696", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_batchaccount.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3442b86f15be3bef48e0e093a044c4018296dba33327befce549c3ed1d3eedff", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_iothub.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1053c8fad183072c97ced402c7e8ea6df316793f31437256c6b75cd2c00072c7", + "format": 1 + }, + { + "name": "plugins/modules/azure_rm_devtestlabpolicy.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3089c890ddf7d990b1bed1d12d65f2e8ae04ef114092711f9ecdeb1a95c7c7d0", + "format": 1 + }, + { + "name": "plugins/inventory", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/inventory/azure_rm.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "48dffa10112662e43b9d4aa255c10b6e43f9fa5db23317bf5c7fd8a631db29d9", + "format": 1 + }, + { + "name": "pr-pipelines.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5b97ed29cc9a054b1f40eca169d64956405773f082892dd8a9cde955628abee2", + "format": 1 + }, + { + "name": "sanity-requirements-azure.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f1e369249fb90b3fa4f3cb5aa912b0fbb6824b1d2267d11253df6bc65beb1b8a", + "format": 1 + }, + { + "name": "azure-pipelines.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "66d836d2a68eba8cc2df59ba88a2187e11b739cb72a86b45e944ea7010bd8f78", + "format": 1 + }, + { + "name": "release-pipelines.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1d7eb4d7e849e71924f0d55f676fc3780746900eafd3c0d48326f06b95c5b872", + "format": 1 + }, + { + "name": "CONTRIBUTING.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "290a3dfba9b53aecbf86badacfdcb7f320c9a924e02a8f0b4824785a551d0a39", + "format": 1 + }, + { + "name": "CHANGELOG.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "618cbde74ce9bfa81af5465c1f6ac5f488976247815529b1c2f39a9b53f3cb2d", + "format": 1 + }, + { + "name": "CredScanSuppressions.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "98c0ee6a0b7a115787cf109a8c0e2ea4a61df6751ecda37915b22ffb44a1128d", + "format": 1 + }, + { + "name": "ansible.cfg", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2caa654e85c7e66be39d494d173f69f7565059c504b7201f2ee033173ab80df0", + "format": 1 + }, + { + "name": "LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8b1ba204bb69a0ade2bfcf65ef294a920f6bb361b317dba43c7ef29d96332b9b", + "format": 1 + }, + { + "name": "tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/utils/ado", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/utils/ado/ado.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "147f8cf89d2b91ff642805964b49cb149b5f0f5ec5c2dc47b1efb567f6b54a73", + "format": 1 + }, + { + "name": "tests/utils/shippable", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/utils/shippable/sanity.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1cd365c5bad1facb7e98cad97a20e34231f49fca169653ccd7d34bd955bd88f6", + "format": 1 + }, + { + "name": "tests/utils/shippable/check_matrix.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f4a38ad2db66b272075c37e9cca352fc7d5b69fe19e32adec0cdd74f91586fe6", + "format": 1 + }, + { + "name": "tests/utils/shippable/shippable.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "829380ef8b40f42a82696ada1168318856596f4e943bbb9a4f5cd6130bbce2af", + "format": 1 + }, + { + "name": "tests/utils/shippable/cloud.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "99db6946e47cf9e49ab2fccbe0aca8ffc9aaa0918fdc9e3ef543601c55a98713", + "format": 1 + }, + { + "name": "tests/utils/shippable/timing.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f3f3cc03a997cdba719b0542fe668fc612451841cbe840ab36865f30aa54a1bd", + "format": 1 + }, + { + "name": "tests/utils/shippable/timing.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e4906097dcc02406930229bf0a026f667327fbe11ea6b9763bc76b4c3e98be29", + "format": 1 + }, + { + "name": "tests/utils/shippable/azure.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "99db6946e47cf9e49ab2fccbe0aca8ffc9aaa0918fdc9e3ef543601c55a98713", + "format": 1 + }, + { + "name": "tests/config.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3e67c280ecbe32951edc9c5aacb7b514cc2a5bc70118cc43e79c52ca2b606138", + "format": 1 + }, + { + "name": "tests/sanity", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.13.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1bebd2850c6d8fec526b000b7e82e9433a001060f60807379ed86e7ff9c1ac50", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.12.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8daa4d81f82078d505ab1c2a7bfe264bc39bf8cbf5c2b3bfca5d7179f351b148", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.11.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8daa4d81f82078d505ab1c2a7bfe264bc39bf8cbf5c2b3bfca5d7179f351b148", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.9.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ffbea11daa9fd2fa17bf7aa84a16f321c9ff581b7a69e5f3741b3019f7cd2f8e", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.14.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "da02cd547c88fa6e356d6867354b1b3db7a0f04d13fb562afaf5be1058957227", + "format": 1 + }, + { + "name": "tests/sanity/ignore-2.10.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8daa4d81f82078d505ab1c2a7bfe264bc39bf8cbf5c2b3bfca5d7179f351b148", + "format": 1 + }, + { + "name": "tests/integration", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_adgroup", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_adgroup/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0e90b0bd13c40122b87176faa412a83730dcc85c0060dffa0d7b0450d559ed40", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_adgroup/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_adgroup/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "164f83628dc35c78e707c2e1e2263108763f36f14b8dd6dd582d3b80668bb533", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_adgroup/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_adgroup/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_notificationhub", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_notificationhub/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1be076424fbf717d678e202250426e9b368688f8d3029f0c5dc262b97df819af", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_notificationhub/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_notificationhub/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d9624fdf71acaf83f57f4e4756520640e656e90d281c05a9ed732eb7d5839e8a", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_notificationhub/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_notificationhub/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a66dc9e034ad04e167bd52df4f75f2bea4499b25349fc42b3c8de9e2fe806996", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_datafactory", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_datafactory/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a74040b53394bd68b1885c350f1f62603740e721ca38e01609660895693a3858", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_datafactory/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_datafactory/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cf84ad6d9aa2237a467d38db0d56b0491c7d164a3929d2ad58a80ca634ed8611", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_datafactory/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_datafactory/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_resourcegroup", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_resourcegroup/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cd30933c00554775a36ee6421935b7d8baf776db7cb35d5a103be9e18b7d2486", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_resourcegroup/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_resourcegroup/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d1e3d0679a5f91a33aba0faa65678c972d07f6cca59ad6ffa065bf31e6f5e59e", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_resourcegroup/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_resourcegroup/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_backuppolicy", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_backuppolicy/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "109bb8ece8ca54a061f03f7050aca3401610606b79a6e313d2556dcb04fda637", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_backuppolicy/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_backuppolicy/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "75cbd5e96f3bf83cf30b8d6b09c698df02a860f480ae11270f4703d54cce0728", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_backuppolicy/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_backuppolicy/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_monitordiagnosticsetting", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_monitordiagnosticsetting/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cc618d5714ddaae8f1482cf89dcf3327cf82fa378c16f6b5cf092339927261c3", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_monitordiagnosticsetting/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_monitordiagnosticsetting/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "72fe8b33496a877069bc27654c879e619ca34b55982bbd4f92740a8c1b307260", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_monitordiagnosticsetting/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_monitordiagnosticsetting/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_networkinterface", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_networkinterface/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "79cafc6b10612f907e2e4a97bac50c9948ac0a0dd5030875a9465204a6323b48", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_networkinterface/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_networkinterface/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "55b06254bf273e7af12bdbe3bbba3e0fcb1fbad6bdf12996096321c458a881b2", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_networkinterface/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_networkinterface/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_securitygroup", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_securitygroup/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a390a5b729f05cbb0f6d91d7d663389c81a571a34bd2e931dc56fcdaee025754", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_securitygroup/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_securitygroup/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c7d9797319b6a4e93b75674013e526aedd93f352b682319ec2fe65a05c8889db", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_securitygroup/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_securitygroup/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachineextension", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachineextension/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0fdd66037c00d326e8724db280f591414ae437954cdedeeebb4b455784fcb4d3", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachineextension/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachineextension/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "92002597ad6d4b7ff0e7f835194fe628ea09447982b20d05b93cd51ee0966b0a", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachineextension/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachineextension/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachineextension/files", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachineextension/files/test-protected-settings.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "436711afe43e0c31f515a016fa56e64c70f1df35502725327b8c703773d4823b", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachineextension/files/test-public-settings.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1063a36dc41ca530af7839c69ec22f2b92447be4c3b41e90429c970b23d9116b", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_azurefirewall", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_azurefirewall/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c00350cca0b02532fa9ae31b5fdf0d5b963615b521197b71ac4cc024aac9d037", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_azurefirewall/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_azurefirewall/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5b16a841fdb7190e7c70c2e94025735563442af45795ebb81cb4b028d9b317e9", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_azurefirewall/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_azurefirewall/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_roleassignment", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_roleassignment/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "51bfa0d69b7c8b7e94b28aa9c60c921ace3971f50a385673a66791c31749678a", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_roleassignment/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_roleassignment/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3f00c67e5c33a10bb37d546c8d96868716a354b63f69b7ebdd6601f2cb55cab8", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_roleassignment/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_roleassignment/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_roledefinition", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_roledefinition/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cc618d5714ddaae8f1482cf89dcf3327cf82fa378c16f6b5cf092339927261c3", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_roledefinition/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_roledefinition/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "04393583f4695e833ec9c02daf81d7889cb14aacafa612bf41628ba775eb66a3", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_roledefinition/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_roledefinition/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_cognitivesearch", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_cognitivesearch/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b09e7a20443d9fe8e820607cf9ae826988c41fe890af049d025cb7eba9e4dbb0", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_cognitivesearch/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_cognitivesearch/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "baa00bf3c1cd7f7a8ea685b173471ccdd925d75a520ee4d6ec8d96ea2c9d9239", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_cognitivesearch/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_cognitivesearch/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachine", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachine/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "deefc8fe8e3699aa1e8852f316f52c0037219050b5b5d07919fa24d06ac14ed5", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachine/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_no_nsg.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "21191e83b2ba1806e8f21eb88693cec6196c42f57bfe21732caa7743fe7ada1d", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_no_public_ip.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "220ce9a496dd09871db893cb671019cd529172f65ad99ebbc276c29d9a636b2f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_image_specific.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8bd990c699b676efd3c10984295efd6ecd4196d6731e0abb3e83e178be97881f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_deallocate.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "077fa2cdbbcdc6ebf667aeb177e25e4dd6228852a90ad47b64f3b561b0378449", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_ephemeral_os.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "47c62e68edbce76c046c9ee33c983775a500ed1bdf1a107398059552a3680942", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachine/tasks/setup.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "140b0bd768325fbe3148cfe81dd4a4002e07009fa651895c8647c36dc643838c", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_minimal.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5176d99917035c41c250d68271e54d236daa6c66e3dbc6e9608033144d822d07", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_invalid.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "24eff79c0c7c5396967e5240f42f13f69dd00177d12bd52889beb323e738445a", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_spot.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "64879be6318be1cc70ca9af952868a4079485761ed64ec1266b1894fe876a632", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_minimal_manageddisk.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "de159eb438242efc5c51641147de79d61512f8617810472588830fc1f77abe5c", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_image_latest.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "726f4a5feb22393286eb1d2b8ae72610b68dfe6e979473bd03bbee9c5eb55da9", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_public_ip.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "aa2de5de07ae296548865061238e55d4b15db2b6f9b4dcba1bf4dd9b4a36dfb9", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_dual_nic.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "64b8519ca7a160ae3ce6ccb1cca73a6c1038d7374ff6b4060aa6024b9cb0744c", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachine/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d765575a20e2788091b6cbade725e09e295a46aa42b6c9e7168daf3f507891c9", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachine/inventory.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "98a1bdc5892fa44ebbe6cd64550b850bf8fc61298de11c4d3bfd2f3d8621cd12", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachine/runme.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8d9a70f8c75d9a856cd529ae8ccb6e589c0db9494da4bb08cead6963e98d9dd5", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_mariadbserver", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_mariadbserver/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0f1123b3696957f55774d4316a4461182998975a0db1f960ef289b20b5e926c7", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_mariadbserver/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_mariadbserver/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bf385689a7c4b6dbded1c113fe095815014e8a833c8f1f59aa2284df8caeee22", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_mariadbserver/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_mariadbserver/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_devtestlab", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_devtestlab/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "435162ac2df7aa373bc2d191a07d79c283d3891a1278b0277f85e6112b96fc03", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_devtestlab/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_devtestlab/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7379179cad070c29d6055139c631d98000d6ea15dd271aeba5c997677de00985", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_devtestlab/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_devtestlab/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_multiplemanageddisks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_multiplemanageddisks/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ea124e6a615561c87b4dd84d829b1eb1d2bd1658d1caf6211019eeda3ffc8136", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_multiplemanageddisks/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_multiplemanageddisks/tasks/test_async.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "734d43b960ffc7db235dde46ac78131614571043f2fccf7194fda91ef4158e80", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_multiplemanageddisks/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be8b2a1ea6a137dff449749ad05389cc76d9507a31b1936354e4b6a5643fe871", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_multiplemanageddisks/tasks/test_shared.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "436fa42bc89d6926a91254d61ebc57a87df84cbe54519f546b9fccd86b0f38b8", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_multiplemanageddisks/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_multiplemanageddisks/templates/disk_config.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d52e27205862487e08375c08ac7c8aa27c059db63c3fb83ebb8dd8eb0e7e11f6", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_multiplemanageddisks/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_multiplemanageddisks/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_multiplemanageddisks/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_multiplemanageddisks/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e74506a8cfbf91887befefae0b179455f0071b5d904b30c00b67897373123b23", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_account_info", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_account_info/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a58bdac3b7068e244f2cf71f0a1a0bf136bc8785d2a8947fecbafdd97313adb7", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_account_info/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_account_info/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "11c0ebfdf8010257f9bfcbb31b1210ea509fe8f49d14436557fec6778617bb35", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_account_info/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_account_info/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_webapp", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_webapp/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "75b6fa7694979e088e4b068016381f37a603045a395e3b8c660032f4008257d5", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_webapp/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_webapp/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "58917acf9278716057d2176e0b31da97cd7755ef63b71187bdea74c5fe093ada", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_webapp/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_webapp/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_mysqlserver", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_mysqlserver/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2c74420c4bc9cc5d8af14406a1d97ea762c6b8afa4de8728a4a833b2e742cf41", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_mysqlserver/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_mysqlserver/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2803b32a2a95737e3838dfb137e4909b1a1268568c268920297cdbcc95773cd2", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_mysqlserver/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_mysqlserver/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_aks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_aks/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "783d6dbd1b9cc4b0543d6e96d15206283018e0c291764a2a4106e860d41cb210", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_aks/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_aks/tasks/minimal-cluster.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "25a42e35f5f43fb98379902f469b6da0cd3c2f49003815d54684bce5a4433745", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_aks/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6a870c4f3517f812f6ec72649e4fa0e030cd2e3f3d96a11e74e75afe64142b84", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_aks/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_aks/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_openshiftmanagedcluster", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_openshiftmanagedcluster/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "20ed61d096d02ba8d900ce019b1762e22889c502a78e1d8716fe4a78ed8c5315", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_openshiftmanagedcluster/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_openshiftmanagedcluster/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "601cccaad8ac15018cda761e360e04a19e31cffb2232c7705b605899f5aca414", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_openshiftmanagedcluster/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_openshiftmanagedcluster/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_servicebus", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_servicebus/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "923b442d786cff06e0b7a3ea4232f7bfe10835e93fb73b8df6ee997380099420", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_servicebus/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_servicebus/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b3ab66c8990eb64d0b1a23ba04db9b5a9cd34306079309d30b89bf2f1cbf6f2b", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_servicebus/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_servicebus/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_workspace", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_workspace/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "08cf46c06f61ba50ecd182f45dfcf2ad7529f1df439d0ca713a8ceba58e14c99", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_workspace/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_workspace/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f2f2f5b5e5aed797d31f27f3ffe873d3885d1c84f3303e2f114091620e74110e", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_workspace/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_workspace/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_ipgroup", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_ipgroup/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b09e7a20443d9fe8e820607cf9ae826988c41fe890af049d025cb7eba9e4dbb0", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_ipgroup/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_ipgroup/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e540f16042dc4b7421f84f0b409a87c99bac21cabcedabf9a45c7dae3e93d38d", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_ipgroup/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_ipgroup/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_storageblob", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_storageblob/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "109bb8ece8ca54a061f03f7050aca3401610606b79a6e313d2556dcb04fda637", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_storageblob/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_storageblob/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2edc512654e17a39d5fd83d0fd5a173f243bea75995ec29b7647fc0143dc4e19", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_storageblob/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_storageblob/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_storageblob/files", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_storageblob/files/Ratings.png", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "20cdf86ae984fb64eefdc57a0e03f34c83cb8b27834b33c84768cc2b62cf9d68", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_privateendpointdnszonegroup", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_privateendpointdnszonegroup/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "11c35de489a3da87eb40a2ce00a80a9992fc89c3b3fdee710301ba9faacd332d", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_privateendpointdnszonegroup/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_privateendpointdnszonegroup/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d496f16d0aa619a4e8961628d51c436b747c062e6dd7456d6ab19782ca1680d0", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_privateendpointdnszonegroup/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_privateendpointdnszonegroup/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_webappaccessrestriction", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_webappaccessrestriction/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "262ad6ab8a8087f2f21d4f26a2c2d02c347a7edf0fb1af8fdb931ab1c1087bbb", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_webappaccessrestriction/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_webappaccessrestriction/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0e186aa6d7f0968de08490f3eb4a764f6723d2c5c686cc48ca2aaf8c3a582e45", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_webappaccessrestriction/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_webappaccessrestriction/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachinesize_info", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachinesize_info/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "262ad6ab8a8087f2f21d4f26a2c2d02c347a7edf0fb1af8fdb931ab1c1087bbb", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachinesize_info/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachinesize_info/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f72cf8c42cd894dbfb720af369e9a548258c813fb6021ebbc42ab7daf7e17521", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachinesize_info/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachinesize_info/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "86ade1ce7ec530fef84440672efaf3b60c253946bb82f5f0a134a9691bc6ffad", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_subnet", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_subnet/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "109bb8ece8ca54a061f03f7050aca3401610606b79a6e313d2556dcb04fda637", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_subnet/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_subnet/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e6e7ac2b9d8dcdb484f8f6a6fd6d594aea99e0955f517714fd0a94abb4a2abba", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_subnet/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_subnet/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_automationaccount", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_automationaccount/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "87c7ae7077faea9533bb827344f49dadfecfdbc5ea179058d72f7bad3ae81388", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_automationaccount/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_automationaccount/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1b024b67a5cacec07205761b9497728f87eea706335ae8807cbebe0d046a36e1", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_automationaccount/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_automationaccount/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_iothub", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_iothub/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "109bb8ece8ca54a061f03f7050aca3401610606b79a6e313d2556dcb04fda637", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_iothub/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_iothub/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3a4b7fbea53b2ac97764fa79a83a290cba2534d195f8c7b1e8e2ec8fcb233f69", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_iothub/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_iothub/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_subscription", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_subscription/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "109bb8ece8ca54a061f03f7050aca3401610606b79a6e313d2556dcb04fda637", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_subscription/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_subscription/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2a7ba246fec2015e9ec84aad7e917f0b752d09d113666ce2ba9c1530671a9a22", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_subscription/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_subscription/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_availabilityset", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_availabilityset/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5647901b6198849a4dd58b7fb51aee85b889220f80acde88cea67bffff453d8d", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_availabilityset/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_availabilityset/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c30b16aa5ebe6fdabe4fd9b9cdf0b6dbc4c2b446c8b0e99a19dea16ecc99d3f4", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_availabilityset/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_availabilityset/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_functionapp", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_functionapp/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "262ad6ab8a8087f2f21d4f26a2c2d02c347a7edf0fb1af8fdb931ab1c1087bbb", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_functionapp/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_functionapp/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "08e8580923076560eb7f8dd2ab853c99b331bf840655f676a0185cb0e5550786", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_functionapp/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_functionapp/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_image", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_image/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fca45b39c1e6a21b8fb8e1b6c8f5954d8accf0bc6fa436361d75b014b36a8fd6", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_image/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_image/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2dd2291b8998e049919a4f19eb92b7b0165845c98230249250eb8cf71edbeb54", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_image/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_image/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_managementgroup", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_managementgroup/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e7fc1f0b30d75ba7b3a8f5a9a45a7703c06585edc0f3ce9231a9b2ca7a649864", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_managementgroup/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_managementgroup/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9bbc4f4b4ca0601a10dca24bc664363538736a8c80f70553e1f4f80d1a66c56b", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_managementgroup/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_managementgroup/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_trafficmanagerprofile", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_trafficmanagerprofile/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b34b028873420b7fdba3b5e4f0b3a1283d3879fcf1ee2d91dbe8a732651ca04a", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_trafficmanagerprofile/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_trafficmanagerprofile/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1a654c2c02f89f415a85b255516d76fd5aec7096a256c20f1d2daa4b39209bb3", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_trafficmanagerprofile/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_trafficmanagerprofile/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_bastionhost", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_bastionhost/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d57abe6f8fb0cbfb8ac22431c09126cd48bb4d2836a6d151faf92fb6fbecc6aa", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_bastionhost/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_bastionhost/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ef98e39895f3892d2e977cc4a79ca2aea6c6bd61fb6dbc8a31774a765d9940ed", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_bastionhost/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_bastionhost/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_firewallpolicy", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_firewallpolicy/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b09e7a20443d9fe8e820607cf9ae826988c41fe890af049d025cb7eba9e4dbb0", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_firewallpolicy/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_firewallpolicy/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b06b43e022f24167d8d145bbf99d8289974eeb358f61f554ba58df26a36eb1c1", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_firewallpolicy/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_firewallpolicy/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_appserviceplan", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_appserviceplan/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3d8b505b037b869206fe23e768d9d42e48dc8db7c83ac3023ec1be689261f907", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_appserviceplan/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_appserviceplan/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a8bed0c09ce07686ed97137363a429a5449b364fecc25457c881d26e61e97634", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_appserviceplan/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_appserviceplan/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualnetworkgateway", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualnetworkgateway/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4f59a6f1fc64a79191d0110c2f203adee747fd1bb69ad631a279efa7d370eaec", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualnetworkgateway/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualnetworkgateway/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e0006451e2b966266a67a1aebf9f2d648e016828447550f0f083a76ace66dfe5", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualnetworkgateway/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualnetworkgateway/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_publicipaddress", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_publicipaddress/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ed3159c0b56e6e42548ed58456e5304d48aea0b4e3a0999591f863ae79b225ba", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_publicipaddress/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_publicipaddress/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2c35780d3e24b7197e278387511cefb3fcacd33779a474978c79d556c9f5d214", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_publicipaddress/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_publicipaddress/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualhub", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualhub/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b09e7a20443d9fe8e820607cf9ae826988c41fe890af049d025cb7eba9e4dbb0", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualhub/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualhub/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4a7c4f53ae8ca7dd799b96fc7a093376c5bd4180045c990eedbaadcd4314d30e", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualhub/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualhub/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_monitorlogprofile", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_monitorlogprofile/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cc618d5714ddaae8f1482cf89dcf3327cf82fa378c16f6b5cf092339927261c3", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_monitorlogprofile/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_monitorlogprofile/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bcb0cc259330b2575654edbc4c0451ee9a329e28fec78eab3c1d68bf9df418ca", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_monitorlogprofile/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_monitorlogprofile/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_recoveryservicesvault", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_recoveryservicesvault/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "923b442d786cff06e0b7a3ea4232f7bfe10835e93fb73b8df6ee997380099420", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_recoveryservicesvault/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_recoveryservicesvault/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be03a728e66a55b15c137fce7592331229fbffd37fccc0a54db98a58693c7143", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_recoveryservicesvault/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_recoveryservicesvault/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_expressroute", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_expressroute/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b09e7a20443d9fe8e820607cf9ae826988c41fe890af049d025cb7eba9e4dbb0", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_expressroute/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_expressroute/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bbcef6a2061617b1ff6d3d158d0c78a55ac7e993e4e7ab0d584817ead1a1d043", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_expressroute/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_expressroute/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_apimanagementservice", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_apimanagementservice/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "528893af1cac01f38ab277aec0138b83c74b2576464d96eb7f3da330784edaff", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_apimanagementservice/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_apimanagementservice/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "00beef6f868cdbf51ff2e042703c071e86bffbe6c3cc1beb22206bf3c14295a5", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_apimanagementservice/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_apimanagementservice/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "86ade1ce7ec530fef84440672efaf3b60c253946bb82f5f0a134a9691bc6ffad", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_gallery", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_gallery/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e676d2331077a7a453c2694fef48b0b56195d00e5a1009e3f668a7e174abd16b", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_gallery/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_gallery/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cf595e8d122e1ea83685f48709c3527c46c0b27ed42da07240dcf5292ca92671", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_gallery/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_gallery/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_webappvnetconnection", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_webappvnetconnection/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "262ad6ab8a8087f2f21d4f26a2c2d02c347a7edf0fb1af8fdb931ab1c1087bbb", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_webappvnetconnection/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_webappvnetconnection/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "790f3c15bd2fa9ba2013a707b5ffb5996c1562bff7b9886e7c85c05fd327ab7f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_webappvnetconnection/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_webappvnetconnection/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_privatednsrecordset", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_privatednsrecordset/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "36614727374728e26b5686e9fac66641e5b9c268b6945f2dcd1b4a9c4d2232c3", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_privatednsrecordset/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_privatednsrecordset/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e8c0c21655e769226d1dabcfc309afff1d8c8a90780ec48cd45bc5f5eb53e7c2", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_privatednsrecordset/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_privatednsrecordset/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_aduser", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_aduser/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1658975f0508387cde4cda499d7eb9524e31bc2f02cc49d6483d7e210d991234", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_aduser/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_aduser/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "540275f0a3abdbfd214f0379161cdaf147a166dd4678ce769b87796feaaed4a5", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_aduser/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_aduser/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_cosmosdbaccount", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_cosmosdbaccount/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d9b370414fbcd599ba1e1fa7adb52887e0df75d2ffce2a609bd5af743b02f600", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_cosmosdbaccount/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_cosmosdbaccount/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "089a86d47541fecaa694afb80c727708a8b7ebb7d5d6e8dd403b8fecd24668a2", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_cosmosdbaccount/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_cosmosdbaccount/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_routetable", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_routetable/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "262ad6ab8a8087f2f21d4f26a2c2d02c347a7edf0fb1af8fdb931ab1c1087bbb", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_routetable/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_routetable/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6e64f574f81b0cbeef18f1be2be5d0a56ffcf838a288617d9ea8862983beaef6", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_routetable/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_routetable/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_loganalyticsworkspace", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_loganalyticsworkspace/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f6ab21fb1deaf7061dd270b8ca0bf7c6e585eac172153559349e997150abbcaf", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_loganalyticsworkspace/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_loganalyticsworkspace/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7fc2d8a21d03401472a808094ab1cb8ce6c18d5a22cbc916a6bd672b0349271f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_loganalyticsworkspace/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_loganalyticsworkspace/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_manageddisk", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_manageddisk/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b79d487ac5f66f90cfba1e03eb40daf31086e77447861b1cf4aeceee77d9745b", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_manageddisk/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_manageddisk/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f94f8141efef361c15c59980b3387edee7d4ddc5555fd7e0adab1484df9b8191", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_manageddisk/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_manageddisk/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_diskencryptionset", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_diskencryptionset/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b09e7a20443d9fe8e820607cf9ae826988c41fe890af049d025cb7eba9e4dbb0", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_diskencryptionset/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_diskencryptionset/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "14e4d1ad9f066c25e177ef5e8e34a1388615fcef45ed5e62f061972f7ec3d5dc", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_diskencryptionset/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_diskencryptionset/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_diskencryptionset/lookup_plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_diskencryptionset/lookup_plugins/azure_service_principal_attribute.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d08fa7846e9ec68c2a26501edf91ed777a7bfa60f219470df82cfb66fd7dd6e1", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_privatelinkservice", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_privatelinkservice/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "109bb8ece8ca54a061f03f7050aca3401610606b79a6e313d2556dcb04fda637", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_privatelinkservice/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_privatelinkservice/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0a170e5d29f98d1744ce53291462d95aa0687cacd82ba80c2df89f6dc7c095a0", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_privatelinkservice/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_privatelinkservice/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_containerregistry", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_containerregistry/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ec1ff0ecb096def7880d4a1aab74cae1ee3e46dbdba9df7f476b7311ee64f2ab", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_containerregistry/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_containerregistry/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "268310390c136d6f4ef1873f15f59d85e23c7fa9971d10b9a1716069138a18cf", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_containerregistry/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_containerregistry/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_adserviceprincipal", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_adserviceprincipal/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "528893af1cac01f38ab277aec0138b83c74b2576464d96eb7f3da330784edaff", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_adserviceprincipal/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_adserviceprincipal/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bc547f3e3d85cbbddf317023aa640e9ba44003ec4f6282f8f6226ac24a53c1a4", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_adserviceprincipal/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_adserviceprincipal/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_sqlserver", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_sqlserver/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b497f92848d198221058a5f5aa42e37dc513f9d2a64f54b2ddc9029b80ab7219", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_sqlserver/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_sqlserver/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4be907accf51f165748454fc1a5159c59b6997a28b8e1b2c4c22c249ec777f4f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_sqlserver/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_sqlserver/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_datalakestore", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_datalakestore/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b09e7a20443d9fe8e820607cf9ae826988c41fe890af049d025cb7eba9e4dbb0", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_datalakestore/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_datalakestore/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "91c36efb148a6e7e8816ff3f599ae2641ac57aaeb92d7d62a2736b7c07b6d5fc", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_datalakestore/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_datalakestore/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_appgateway", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_appgateway/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "21442459796e3c03ab3c6461c8251810f5545ff2320c4c814cce8b8c520e615c", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_appgateway/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_appgateway/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1918adad547000201f74726f4592154582274ef4debc12523d9458c7e7ed6571", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_appgateway/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_appgateway/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_appgateway/files", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_appgateway/files/cert1.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7637ce7f36cca366294688f612f6456714088cca19583ae7e9475e572d85f4f5", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_appgateway/files/cert3b64.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1375dc6f493fb760c6087c4cdd62c4ea898887070ad17dc84196b3d6ed0545ec", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_appgateway/files/cert2.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e4dc34faa2e8e94cd385be73c33a849f452dd57d061a8aec118e6d391e46bbc0", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_dnszone", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_dnszone/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1af368a225674fcae08ea809c4dc605f03bc0334b44a9b17dfab0dfe49d93ac6", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_dnszone/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_dnszone/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3ee8d72a90072536d2cd063f43e5d907d629eaad1aad031c68547328bb25e934", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_dnszone/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_dnszone/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachinescaleset", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachinescaleset/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5510a946bf7e1d1ad196eaa73474176b122ae491702a99f9160ad1d554fc08bf", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachinescaleset/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachinescaleset/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e8a554bc94ee2b932713ecb72d3e19b09d5a61764b2b4b4d25b7a333b932aca2", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachinescaleset/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachinescaleset/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualnetwork", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualnetwork/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "109bb8ece8ca54a061f03f7050aca3401610606b79a6e313d2556dcb04fda637", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualnetwork/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualnetwork/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3a1898e5f4ad9974807f3688113c3c8ba5620c62767a773fc61e53fcc783a1e0", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualnetwork/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualnetwork/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_dnsrecordset", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_dnsrecordset/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "36614727374728e26b5686e9fac66641e5b9c268b6945f2dcd1b4a9c4d2232c3", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_dnsrecordset/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_dnsrecordset/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6b9f536a2062adc87ce808e6090b3fbdca0a6cf5738c2240bd4e42631ad4b6d3", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_dnsrecordset/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_dnsrecordset/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_deployment", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_deployment/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "11d8df93f7c7fb0bd35df23550ce55cea64300cbd7b3a1282927d49c53f53951", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_deployment/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_deployment/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "40e1e28e9d4f20a6e94eea183148fd635ab49fe7fb2d22e3690ad2dacbc0dc5c", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_deployment/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_deployment/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_adpassword", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_adpassword/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0e90b0bd13c40122b87176faa412a83730dcc85c0060dffa0d7b0450d559ed40", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_adpassword/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_adpassword/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "03dd808fe4a8bac380c00d4d955647b943104ac15c4d48f8d56220694d8fc360", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_adpassword/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_adpassword/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_keyvault", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_keyvault/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "726101a2dd7238a8ff7f94fec489e5fcfd8bb0db10ecd074fa0686b2eb9d6d24", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_keyvault/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_keyvault/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0a5a277a2c0ecfe553d34e9f9dbca06d962f8de2e2aae5c9b54be0d9f037afe9", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_keyvault/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_keyvault/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_keyvault/lookup_plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_keyvault/lookup_plugins/azure_service_principal_attribute.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d08fa7846e9ec68c2a26501edf91ed777a7bfa60f219470df82cfb66fd7dd6e1", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_privatednszonelink", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_privatednszonelink/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b09e7a20443d9fe8e820607cf9ae826988c41fe890af049d025cb7eba9e4dbb0", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_privatednszonelink/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_privatednszonelink/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "93d612e8b237141324401fcecd2c981476861705baf09030841246f5a951452e", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_privatednszonelink/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_privatednszonelink/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_postgresqlserver", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_postgresqlserver/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "212c2162821130244834f49afd3460bb037d426eac902fc88de51afbe6bab522", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_postgresqlserver/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_postgresqlserver/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "71f1647be9d4dee9a954cc2d07f05b7f7dcbb8bee5d04969b78a55c83972d4e9", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_postgresqlserver/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_postgresqlserver/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_natgateway", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_natgateway/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "21442459796e3c03ab3c6461c8251810f5545ff2320c4c814cce8b8c520e615c", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_natgateway/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_natgateway/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bb517955919b3994264ff27d39b427b342ce4e414807e327fcbc604de98de662", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_natgateway/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_natgateway/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachineimage_info", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachineimage_info/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8eb6da86566844e8278692f99177c27f5e17dfc808484e9168d842ce4ea822b2", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachineimage_info/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachineimage_info/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f31e17c306b2a84fa24b67a1ff63e35d3156b1e7d2e313199331a7108e87deeb", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachineimage_info/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualmachineimage_info/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_apimanagement", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_apimanagement/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "592e63285615e1fe4768ff5027ae6a96e4dea5c875e1127e14c393cfacbc44fb", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_apimanagement/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_apimanagement/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "43ae7f214db19a05f6fba586c9afef61c4c6345d7c75fd00b617293208cf693a", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_apimanagement/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_apimanagement/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_registrationdefinition", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_registrationdefinition/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "262ad6ab8a8087f2f21d4f26a2c2d02c347a7edf0fb1af8fdb931ab1c1087bbb", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_registrationdefinition/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_registrationdefinition/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a583f5bc8add4b7e43a0d426bd822600192802c9bc15f8f3f48bbeecac5485f7", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_registrationdefinition/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_registrationdefinition/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualhubconnection", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualhubconnection/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b09e7a20443d9fe8e820607cf9ae826988c41fe890af049d025cb7eba9e4dbb0", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualhubconnection/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualhubconnection/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6e38f9c74c7efd203ae7c1760075b8452b0f36fd1348ab216351727444885a91", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualhubconnection/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualhubconnection/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_autoscale", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_autoscale/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2019d6f29a1dbe1242200ff0a0485722bc57431db8265a741c5e501cd5a920dd", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_autoscale/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_autoscale/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6f59879bb7f425656a8a514579d39e3f5cb83b8249b050fc7d44583b7037869a", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_autoscale/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_autoscale/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_hostgroup", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_hostgroup/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b09e7a20443d9fe8e820607cf9ae826988c41fe890af049d025cb7eba9e4dbb0", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_hostgroup/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_hostgroup/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8147fe03a7a144f1365298a436b37c14c92c2ad95a8debe63052b63017ee6774", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_hostgroup/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_hostgroup/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualnetworkpeering", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualnetworkpeering/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ee3b904aa8ab8d0a7e47983927157b420c195fe1d8606de015158339e7824a43", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualnetworkpeering/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualnetworkpeering/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "38a6724132aab6bfc1da0eb2706ccbad284a4f40e952cff9a43817cbe499eba0", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualnetworkpeering/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualnetworkpeering/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_eventhub", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_eventhub/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1be076424fbf717d678e202250426e9b368688f8d3029f0c5dc262b97df819af", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_eventhub/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_eventhub/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9d770a67dd20f3735183550907fc2cb0bbb4a3b39032b36c0e03582dff92f742", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_eventhub/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_eventhub/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_resource", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_resource/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b9685232ad3eab40ee59426a3dd04a8f71719dbfa1d6becb498bb416c8d1ff1c", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_resource/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_resource/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a95da8a8656a35415a3b437feed732f73ed55cebd52ec82b5fb1be4d10584f73", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_resource/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_resource/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_vmbackuppolicy", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_vmbackuppolicy/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "923b442d786cff06e0b7a3ea4232f7bfe10835e93fb73b8df6ee997380099420", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_vmbackuppolicy/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_vmbackuppolicy/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cccf65de4bf107159176c3acc0a67856a4bd0022ee22320a72e973fa2d954004", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_vmbackuppolicy/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_vmbackuppolicy/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_proximityplacementgroup", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_proximityplacementgroup/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b09e7a20443d9fe8e820607cf9ae826988c41fe890af049d025cb7eba9e4dbb0", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_proximityplacementgroup/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_proximityplacementgroup/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b82875cadb004d9a7c2de8130132298d55d142bf1a894bb9d60b826c1f248639", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_proximityplacementgroup/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_proximityplacementgroup/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_sqlmanagedinstance", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_sqlmanagedinstance/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d19280a8d5d5c2523536ea5aed2c5a14d1b19cc49f3f21974ca3e9a9921bb7cd", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_sqlmanagedinstance/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_sqlmanagedinstance/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d650dc72294a2dd10db5dfb8c90200c76e61b391db90daa2a6c0a5dc36d9d5ba", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_sqlmanagedinstance/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_sqlmanagedinstance/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_hdinsightcluster", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_hdinsightcluster/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dffdf5ca6061cf1a43744d6b7e0bb18a4020eed3f693702082b2314f982c65a6", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_hdinsightcluster/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_hdinsightcluster/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c8f3a538060cabfd7081234dc8bdd50c2465dac3a07b5e64b527ec496f2d01b5", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_hdinsightcluster/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_hdinsightcluster/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_privateendpoint", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_privateendpoint/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "11c35de489a3da87eb40a2ce00a80a9992fc89c3b3fdee710301ba9faacd332d", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_privateendpoint/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_privateendpoint/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d9a95e51f0df82ac337e57b026a55ec070fefc15203a3dd6d0239e89a09c31de", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_privateendpoint/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_privateendpoint/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_rediscache", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_rediscache/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5ba0bf8a7bf29813d54b53fd7c3e65698ee5f802eb2ccf76c1707680acc07f17", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_rediscache/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_rediscache/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "17e2a11d7e099908c91ef4eb73392f860280b089afe97af692aa449c33f58b07", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_rediscache/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_rediscache/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_registrationassignment", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_registrationassignment/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "262ad6ab8a8087f2f21d4f26a2c2d02c347a7edf0fb1af8fdb931ab1c1087bbb", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_registrationassignment/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_registrationassignment/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "feb8a64c4dcd8021030be7a894117093d24ec2abb4332b401db9a3195d20ae50", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_registrationassignment/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_registrationassignment/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_batchaccount", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_batchaccount/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "049e0b8480c14a44ea8525b83848582417405cd5352a0afc5fc9bc3f1c0f3941", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_batchaccount/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_batchaccount/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "352bccf8fb5c893bf917097828d4fb16cda90c527f007892a9352ed2402aae47", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_batchaccount/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_batchaccount/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_keyvaultkey", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_keyvaultkey/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "36614727374728e26b5686e9fac66641e5b9c268b6945f2dcd1b4a9c4d2232c3", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_keyvaultkey/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_keyvaultkey/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7f3d7cfad90bfc2a0c18b401ad2b52413ad9b3f3c9b02526bbe8e741a9b62595", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_keyvaultkey/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_keyvaultkey/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_keyvaultkey/lookup_plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_keyvaultkey/lookup_plugins/azure_service_principal_attribute.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d08fa7846e9ec68c2a26501edf91ed777a7bfa60f219470df82cfb66fd7dd6e1", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_vpnsite", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_vpnsite/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "51bfa0d69b7c8b7e94b28aa9c60c921ace3971f50a385673a66791c31749678a", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_vpnsite/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_vpnsite/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "746c59c9882ae5210d22d6835db86037cfde1448e1faf7d75842fe8e18cd287e", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_vpnsite/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_vpnsite/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_adapplication", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_adapplication/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0e90b0bd13c40122b87176faa412a83730dcc85c0060dffa0d7b0450d559ed40", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_adapplication/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_adapplication/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d9ee994437da318ecf5a5fef8b899ea236db7d97cca8d027092930f5e181bf4f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_adapplication/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_adapplication/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_azure", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_azure/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_storageshare", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_storageshare/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "109bb8ece8ca54a061f03f7050aca3401610606b79a6e313d2556dcb04fda637", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_storageshare/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_storageshare/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "84ab49dce1912995caef94f302ff7ed98d6234e06fe54d6faa55cd29e802e7df", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_storageshare/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_storageshare/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_keyvaultsecret", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_keyvaultsecret/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c7bb4c8aa02376e55002499e1a01763a649ddf80c253e1da4556ab7719ed05ac", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_keyvaultsecret/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_keyvaultsecret/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e28a64d6bb6fa59065265ee662c3da858db8ba9d278007b84aa76b161ce5905c", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_keyvaultsecret/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_keyvaultsecret/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_keyvaultsecret/lookup_plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_keyvaultsecret/lookup_plugins/azure_service_principal_attribute.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d08fa7846e9ec68c2a26501edf91ed777a7bfa60f219470df82cfb66fd7dd6e1", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_azure", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_azure/test.azure_rm.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_azure/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e2e2f39006f1e6b83660c469a9c29e1dfb17b99cca9caa124626e14cd0658a0b", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_azure/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_azure/templates/basic2.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "758f6dfaa158a3601eb9cd986357f1a40a04071ad62f3ba613002ffde00d6018", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_azure/templates/basic.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3f99d3d1c0b4f896b1f43477667fc744c2cd2d38471c5bbc9287773b57682628", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_azure/playbooks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_azure/playbooks/empty_inventory_config.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fb0ef55005c4532dc1ce027933a64bdd030fc524cf769ba04c7a55af74ad3825", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_azure/playbooks/setup.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d3b5efa255e58e82d8f9b68d69ea0d18acbf391fc9c8c29429ce239a06191fe5", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_azure/playbooks/teardown.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "56f5867ac251afdf3029e70f5c6cc56202b02f5e4a2b7d581a778f0bee2f1d78", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_azure/playbooks/test_inventory.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9d203152d9cf31363d91d57bc1c8d3f41819c65b5794c5e499e8b7f6f610da20", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_azure/playbooks/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "23fff20fb969471888915a24760b7a90723a30e3e0e6b7c7745864470c898027", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_azure/playbooks/create_inventory_config.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4c8ad8ccbb06836f7fd7c8eb03902517c05d3d60669884e6dd993cb4a982d743", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_azure/runme.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "203fe16581a305486f7d5a2feafad324ed5515a96c26b4f4d84ab34a1cdcb6a3", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_cdnprofile", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_cdnprofile/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d922663ea733e96e4da9178ebcd4baa17435a011302f3d609f7391e651d119f9", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_cdnprofile/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_cdnprofile/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5c25b15cf50e7d835c76e61ee05dcdf8d788ca7bc70bc600ac1c01f9461743fa", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_cdnprofile/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_cdnprofile/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_containerinstance", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_containerinstance/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "059ec72ab54682d34b109880aa48f5b5b702d120042153371442c7293c011f31", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_containerinstance/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_containerinstance/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6ea4f42d32c3d5feb8a3764ae1e8c21b32dc5b7c7a3af6d6c1df622dfe7d1a2f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_containerinstance/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_containerinstance/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_containerregistrytag", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_containerregistrytag/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "109bb8ece8ca54a061f03f7050aca3401610606b79a6e313d2556dcb04fda637", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_containerregistrytag/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_containerregistrytag/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b2dafa7a0578a14b8db994de7895c37b6d7548183e277af00ba2df8056bf9a32", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_containerregistrytag/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_containerregistrytag/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_ddosprotectionplan", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_ddosprotectionplan/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1be076424fbf717d678e202250426e9b368688f8d3029f0c5dc262b97df819af", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_ddosprotectionplan/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_ddosprotectionplan/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "56e8c026b7bc7080ae83e08851e81a5e338154dc1c959c430a47a08a3fdf740d", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_ddosprotectionplan/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_ddosprotectionplan/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dc52aeb87665e099cf3a8ea346ef11e82ee323aa360b983d540739839a852fd8", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_backupazurevm", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_backupazurevm/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "91c612e6c7b96f80e7e1cede3dc0ef7431a2900356b7e85a7521e2679bfcfa49", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_backupazurevm/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_backupazurevm/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7f36a9206aee456e050ad93d88f5949f5e2926caa03d5001e7f21faa58c286f1", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_backupazurevm/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_backupazurevm/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "86ade1ce7ec530fef84440672efaf3b60c253946bb82f5f0a134a9691bc6ffad", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_loadbalancer", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_loadbalancer/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "923b442d786cff06e0b7a3ea4232f7bfe10835e93fb73b8df6ee997380099420", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_loadbalancer/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_loadbalancer/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0fcedad3bc473d420c53ef5a9aef83d360b2b7490d1da7d486ceb97a89298e22", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_loadbalancer/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_loadbalancer/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_aksagentpool", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_aksagentpool/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "783d6dbd1b9cc4b0543d6e96d15206283018e0c291764a2a4106e860d41cb210", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_aksagentpool/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_aksagentpool/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0dcd646ff5f0878e03f906d116aeb76539de11af1ba1890a47d88fba0d83b4c3", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_aksagentpool/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_aksagentpool/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_storageaccount", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_storageaccount/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "109bb8ece8ca54a061f03f7050aca3401610606b79a6e313d2556dcb04fda637", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_storageaccount/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_storageaccount/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a38da8b4c75427135c5af1f070ec2938f3028fae753241377d6fa644ef38ec6b", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_storageaccount/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_storageaccount/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualwan", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualwan/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b09e7a20443d9fe8e820607cf9ae826988c41fe890af049d025cb7eba9e4dbb0", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualwan/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualwan/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "626d27329e2174e36d407be9fc626156ac4ed0817e9ccb6a9f125f7c8fa25ca2", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualwan/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_virtualwan/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_privatednszone", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_privatednszone/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e131092efb0e9d778baeb38471a15da9b4f8ab9e3425ec4f37aaece309220d47", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_privatednszone/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_privatednszone/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "864bae11ad5b2e3e708668fd4d8b09b7b8ffa4d27b62ef98d0275a1da830c294", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_privatednszone/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_privatednszone/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_automationrunbook", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_automationrunbook/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "522c2e1148926193596b31fa7cf086b9f57e17c7028abc638a2b6f5bb9269850", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_automationrunbook/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_automationrunbook/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "45759293845fd4f0cde756813fe3887ec228533ff6cc5957d1a776798bc2327b", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_automationrunbook/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_automationrunbook/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_acs", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_acs/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2f7e954a810d0341b1aed1d34e5684bc502980bb6ebbe6a46fb03163ed0e11db", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_acs/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_acs/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6c8990f11a0e4bac2c4cd8131f0494e05386a8a17e519e126910e941b5092c32", + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_acs/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/azure_rm_acs/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f", + "format": 1 + } + ], + "format": 1 +} \ No newline at end of file diff --git a/ansible_collections/azure/azcollection/LICENSE b/ansible_collections/azure/azcollection/LICENSE new file mode 100644 index 000000000..e72bfddab --- /dev/null +++ b/ansible_collections/azure/azcollection/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. \ No newline at end of file diff --git a/ansible_collections/azure/azcollection/MANIFEST.json b/ansible_collections/azure/azcollection/MANIFEST.json new file mode 100644 index 000000000..c4e596627 --- /dev/null +++ b/ansible_collections/azure/azcollection/MANIFEST.json @@ -0,0 +1,38 @@ +{ + "collection_info": { + "namespace": "azure", + "name": "azcollection", + "version": "1.16.0", + "authors": [ + "Microsoft " + ], + "readme": "README.md", + "tags": [ + "cloud", + "linux", + "networking", + "storage", + "security", + "database", + "application" + ], + "description": "The Azure collection.", + "license": [ + "GPL-2.0-or-later" + ], + "license_file": null, + "dependencies": {}, + "repository": "https://github.com/ansible-collections/azure", + "documentation": null, + "homepage": "https://azure.microsoft.com", + "issues": "https://github.com/ansible-collections/azure/issues" + }, + "file_manifest_file": { + "name": "FILES.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e29f531bba6f16e3cfdddb6fa4011507294c1525d9f80f4e68803527f7478fe8", + "format": 1 + }, + "format": 1 +} \ No newline at end of file diff --git a/ansible_collections/azure/azcollection/README.md b/ansible_collections/azure/azcollection/README.md new file mode 100644 index 000000000..b05c76b1c --- /dev/null +++ b/ansible_collections/azure/azcollection/README.md @@ -0,0 +1,97 @@ +# Ansible collection for Azure +[![Doc](https://img.shields.io/badge/docs-latest-brightgreen.svg)](https://docs.ansible.com/ansible/latest/modules/list_of_cloud_modules.html#azure) +[![Code of conduct](https://img.shields.io/badge/code%20of%20conduct-Ansible-silver.svg)](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) +[![License](https://img.shields.io/badge/license-GPL%20v3.0-brightgreen.svg)](LICENSE) + +This collection provides a series of Ansible modules and plugins for interacting with the [Azure](https://azure.microsoft.com). + +Documentation of individual modules is [available in the Ansible docs site](https://docs.ansible.com/ansible/latest/collections/azure/azcollection/index.html#plugins-in-azure-azcollection) + +## Installation + +It is recommended to run ansible in [Virtualenv](https://virtualenv.pypa.io/en/latest/) + +## Requirements + +- ansible version >= 2.9 + +To install Azure collection hosted in Galaxy: + +```bash +ansible-galaxy collection install azure.azcollection +``` + +Install dependencies required by the collection (adjust path to collection if necessary): + +```bash +pip3 install -r ~/.ansible/collections/ansible_collections/azure/azcollection/requirements-azure.txt +``` + +To upgrade to the latest version of Azure collection: + +```bash +ansible-galaxy collection install azure.azcollection --force +``` + +## Usage + +### Playbooks + +To use a module from Azure collection, please reference the full namespace, collection name, and modules name that you want to use: + +```yaml +--- +- name: Using Azure collection + hosts: localhost + tasks: + - azure.azcollection.azure_rm_storageaccount: + resource_group: myResourceGroup + name: mystorageaccount + account_type: Standard_LRS +``` + +Or you can add full namepsace and collection name in the `collections` element: + +```yaml +--- +- name: Using Azure collection + hosts: localhost + collections: + - azure.azcollection + tasks: + - azure_rm_storageaccount: + resource_group: myResourceGroup + name: mystorageaccount + account_type: Standard_LRS +``` + +### Roles + +For existing Ansible roles, please also reference the full namespace, collection name, and modules name which used in tasks instead of just modules name. + +### Plugins + +To use a plugin from Azure collection, please reference the full namespace, collection name, and plugins name that you want to use: + +```yaml +--- +plugin: azure.azcollection.azure_rm +include_vm_resource_groups: + - ansible-inventory-test-rg +auth_source: auto +``` + +## Contributing + +There are many ways in which you can participate in the project, for example: + +- Submit bugs and feature requests, and help us verify as they are checked in +- Review source code changes +- Review the documentation and make pull requests for anything from typos to new content +- If you are interested in fixing issues and contributing directly to the code base, please see the [CONTRIBUTING](CONTRIBUTING.md) document + +## License + +GNU General Public License v3.0 + +See [LICENSE](LICENSE) to see the full text. diff --git a/ansible_collections/azure/azcollection/ansible.cfg b/ansible_collections/azure/azcollection/ansible.cfg new file mode 100644 index 000000000..fecf5b666 --- /dev/null +++ b/ansible_collections/azure/azcollection/ansible.cfg @@ -0,0 +1,2 @@ +[defaults] +collections_paths = ./ diff --git a/ansible_collections/azure/azcollection/azure-pipelines.yml b/ansible_collections/azure/azcollection/azure-pipelines.yml new file mode 100644 index 000000000..406dc2575 --- /dev/null +++ b/ansible_collections/azure/azcollection/azure-pipelines.yml @@ -0,0 +1,45 @@ +trigger: + batch: true + branches: + include: + - "*" + +jobs: +- job: CG + pool: + name: pool-ubuntu-2004 + + steps: + - task: ComponentGovernanceComponentDetection@0 + inputs: + scanType: 'Register' + verbosity: 'Verbose' + alertWarningLevel: 'Medium' + failOnAlert: true + +- job: CredScan + pool: + vmImage: "windows-2019" + + steps: + - task: ea576cd4-c61f-48f8-97e7-a3cb07b90a6f@2 + inputs: + toolMajorVersion: 'V2' + suppressionsFile: 'CredScanSuppressions.json' + + - task: securedevelopmentteam.vss-secure-development-tools.build-task-postanalysis.PostAnalysis@1 + inputs: + AllTools: false + APIScan: false + BinSkim: false + CodesignValidation: false + CredScan: true + FortifySCA: false + FxCop: false + ModernCop: false + PoliCheck: false + RoslynAnalyzers: false + SDLNativeRules: false + Semmle: false + TSLint: false + ToolLogsNotFoundAction: 'Standard' diff --git a/ansible_collections/azure/azcollection/meta/execution-environment.yml b/ansible_collections/azure/azcollection/meta/execution-environment.yml new file mode 100644 index 000000000..08fbf66a3 --- /dev/null +++ b/ansible_collections/azure/azcollection/meta/execution-environment.yml @@ -0,0 +1,3 @@ +dependencies: + python: requirements-azure.txt +version: 1 diff --git a/ansible_collections/azure/azcollection/meta/runtime.yml b/ansible_collections/azure/azcollection/meta/runtime.yml new file mode 100644 index 000000000..0d9c5c166 --- /dev/null +++ b/ansible_collections/azure/azcollection/meta/runtime.yml @@ -0,0 +1,282 @@ +--- +requires_ansible: '>=2.9.10' +action_groups: + all: + - azure.azcollection.azure_rm_account_info + - azure.azcollection.azure_rm_adapplication + - azure.azcollection.azure_rm_adapplication_info + - azure.azcollection.azure_rm_adgroup + - azure.azcollection.azure_rm_adgroup_info + - azure.azcollection.azure_rm_adpassword + - azure.azcollection.azure_rm_adpassword_info + - azure.azcollection.azure_rm_adserviceprincipal + - azure.azcollection.azure_rm_adserviceprincipal_info + - azure.azcollection.azure_rm_aduser + - azure.azcollection.azure_rm_aduser_info + - azure.azcollection.azure_rm_aks + - azure.azcollection.azure_rm_aks_info + - azure.azcollection.azure_rm_aksagentpool + - azure.azcollection.azure_rm_aksagentpool_info + - azure.azcollection.azure_rm_aksagentpoolversion_info + - azure.azcollection.azure_rm_aksupgrade_info + - azure.azcollection.azure_rm_aksversion_info + - azure.azcollection.azure_rm_apimanagement + - azure.azcollection.azure_rm_apimanagement_info + - azure.azcollection.azure_rm_apimanagementservice + - azure.azcollection.azure_rm_apimanagementservice_info + - azure.azcollection.azure_rm_appgateway + - azure.azcollection.azure_rm_appgateway_info + - azure.azcollection.azure_rm_applicationsecuritygroup + - azure.azcollection.azure_rm_applicationsecuritygroup_info + - azure.azcollection.azure_rm_appserviceplan + - azure.azcollection.azure_rm_appserviceplan_info + - azure.azcollection.azure_rm_automationaccount + - azure.azcollection.azure_rm_automationaccount_info + - azure.azcollection.azure_rm_automationrunbook + - azure.azcollection.azure_rm_automationrunbook_info + - azure.azcollection.azure_rm_autoscale + - azure.azcollection.azure_rm_autoscale_info + - azure.azcollection.azure_rm_availabilityset + - azure.azcollection.azure_rm_availabilityset_info + - azure.azcollection.azure_rm_azurefirewall + - azure.azcollection.azure_rm_azurefirewall_info + - azure.azcollection.azure_rm_backupazurevm + - azure.azcollection.azure_rm_backupazurevm_info + - azure.azcollection.azure_rm_backuppolicy + - azure.azcollection.azure_rm_backuppolicy_info + - azure.azcollection.azure_rm_bastionhost + - azure.azcollection.azure_rm_bastionhost_info + - azure.azcollection.azure_rm_batchaccount + - azure.azcollection.azure_rm_cdnendpoint + - azure.azcollection.azure_rm_cdnendpoint_info + - azure.azcollection.azure_rm_cdnprofile + - azure.azcollection.azure_rm_cdnprofile_info + - azure.azcollection.azure_rm_cognitivesearch + - azure.azcollection.azure_rm_cognitivesearch_info + - azure.azcollection.azure_rm_containerinstance + - azure.azcollection.azure_rm_containerinstance_info + - azure.azcollection.azure_rm_containerregistry + - azure.azcollection.azure_rm_containerregistry_info + - azure.azcollection.azure_rm_containerregistryreplication + - azure.azcollection.azure_rm_containerregistryreplication_info + - azure.azcollection.azure_rm_containerregistrytag + - azure.azcollection.azure_rm_containerregistrytag_info + - azure.azcollection.azure_rm_containerregistrywebhook + - azure.azcollection.azure_rm_containerregistrywebhook_info + - azure.azcollection.azure_rm_cosmosdbaccount + - azure.azcollection.azure_rm_cosmosdbaccount_info + - azure.azcollection.azure_rm_datafactory + - azure.azcollection.azure_rm_datafactory_info + - azure.azcollection.azure_rm_datalakestore + - azure.azcollection.azure_rm_datalakestore_info + - azure.azcollection.azure_rm_ddosprotectionplan + - azure.azcollection.azure_rm_ddosprotectionplan_info + - azure.azcollection.azure_rm_deployment + - azure.azcollection.azure_rm_deployment_info + - azure.azcollection.azure_rm_devtestlab + - azure.azcollection.azure_rm_devtestlab_info + - azure.azcollection.azure_rm_devtestlabarmtemplate_info + - azure.azcollection.azure_rm_devtestlabartifact_info + - azure.azcollection.azure_rm_devtestlabartifactsource + - azure.azcollection.azure_rm_devtestlabartifactsource_info + - azure.azcollection.azure_rm_devtestlabcustomimage + - azure.azcollection.azure_rm_devtestlabcustomimage_info + - azure.azcollection.azure_rm_devtestlabenvironment + - azure.azcollection.azure_rm_devtestlabenvironment_info + - azure.azcollection.azure_rm_devtestlabpolicy + - azure.azcollection.azure_rm_devtestlabpolicy_info + - azure.azcollection.azure_rm_devtestlabschedule + - azure.azcollection.azure_rm_devtestlabschedule_info + - azure.azcollection.azure_rm_devtestlabvirtualmachine + - azure.azcollection.azure_rm_devtestlabvirtualmachine_info + - azure.azcollection.azure_rm_devtestlabvirtualnetwork + - azure.azcollection.azure_rm_devtestlabvirtualnetwork_info + - azure.azcollection.azure_rm_diskencryptionset + - azure.azcollection.azure_rm_diskencryptionset_info + - azure.azcollection.azure_rm_dnsrecordset + - azure.azcollection.azure_rm_dnsrecordset_info + - azure.azcollection.azure_rm_dnszone + - azure.azcollection.azure_rm_dnszone_info + - azure.azcollection.azure_rm_eventhub + - azure.azcollection.azure_rm_eventhub_info + - azure.azcollection.azure_rm_expressroute + - azure.azcollection.azure_rm_expressroute_info + - azure.azcollection.azure_rm_firewallpolicy + - azure.azcollection.azure_rm_firewallpolicy_info + - azure.azcollection.azure_rm_functionapp + - azure.azcollection.azure_rm_functionapp_info + - azure.azcollection.azure_rm_gallery + - azure.azcollection.azure_rm_gallery_info + - azure.azcollection.azure_rm_galleryimage + - azure.azcollection.azure_rm_galleryimage_info + - azure.azcollection.azure_rm_galleryimageversion + - azure.azcollection.azure_rm_galleryimageversion_info + - azure.azcollection.azure_rm_hdinsightcluster + - azure.azcollection.azure_rm_hdinsightcluster_info + - azure.azcollection.azure_rm_hostgroup + - azure.azcollection.azure_rm_hostgroup_info + - azure.azcollection.azure_rm_image + - azure.azcollection.azure_rm_image_info + - azure.azcollection.azure_rm_iotdevice + - azure.azcollection.azure_rm_iotdevice_info + - azure.azcollection.azure_rm_iotdevicemodule + - azure.azcollection.azure_rm_iothub + - azure.azcollection.azure_rm_iothub_info + - azure.azcollection.azure_rm_iothubconsumergroup + - azure.azcollection.azure_rm_ipgroup + - azure.azcollection.azure_rm_ipgroup_info + - azure.azcollection.azure_rm_keyvault + - azure.azcollection.azure_rm_keyvault_info + - azure.azcollection.azure_rm_keyvaultkey + - azure.azcollection.azure_rm_keyvaultkey_info + - azure.azcollection.azure_rm_keyvaultsecret + - azure.azcollection.azure_rm_keyvaultsecret_info + - azure.azcollection.azure_rm_loadbalancer + - azure.azcollection.azure_rm_loadbalancer_info + - azure.azcollection.azure_rm_lock + - azure.azcollection.azure_rm_lock_info + - azure.azcollection.azure_rm_loganalyticsworkspace + - azure.azcollection.azure_rm_loganalyticsworkspace_info + - azure.azcollection.azure_rm_manageddisk + - azure.azcollection.azure_rm_manageddisk_info + - azure.azcollection.azure_rm_managementgroup + - azure.azcollection.azure_rm_managementgroup_info + - azure.azcollection.azure_rm_mariadbconfiguration + - azure.azcollection.azure_rm_mariadbconfiguration_info + - azure.azcollection.azure_rm_mariadbdatabase + - azure.azcollection.azure_rm_mariadbdatabase_info + - azure.azcollection.azure_rm_mariadbfirewallrule + - azure.azcollection.azure_rm_mariadbfirewallrule_info + - azure.azcollection.azure_rm_mariadbserver + - azure.azcollection.azure_rm_mariadbserver_info + - azure.azcollection.azure_rm_monitordiagnosticsetting + - azure.azcollection.azure_rm_monitordiagnosticsetting_info + - azure.azcollection.azure_rm_monitorlogprofile + - azure.azcollection.azure_rm_mysqlconfiguration + - azure.azcollection.azure_rm_mysqlconfiguration_info + - azure.azcollection.azure_rm_mysqldatabase + - azure.azcollection.azure_rm_mysqldatabase_info + - azure.azcollection.azure_rm_mysqlfirewallrule + - azure.azcollection.azure_rm_mysqlfirewallrule_info + - azure.azcollection.azure_rm_mysqlserver + - azure.azcollection.azure_rm_mysqlserver_info + - azure.azcollection.azure_rm_natgateway + - azure.azcollection.azure_rm_natgateway_info + - azure.azcollection.azure_rm_networkinterface + - azure.azcollection.azure_rm_networkinterface_info + - azure.azcollection.azure_rm_notificationhub + - azure.azcollection.azure_rm_notificationhub_info + - azure.azcollection.azure_rm_openshiftmanagedcluster + - azure.azcollection.azure_rm_openshiftmanagedcluster_info + - azure.azcollection.azure_rm_postgresqlconfiguration + - azure.azcollection.azure_rm_postgresqlconfiguration_info + - azure.azcollection.azure_rm_postgresqldatabase + - azure.azcollection.azure_rm_postgresqldatabase_info + - azure.azcollection.azure_rm_postgresqlfirewallrule + - azure.azcollection.azure_rm_postgresqlfirewallrule_info + - azure.azcollection.azure_rm_postgresqlserver + - azure.azcollection.azure_rm_postgresqlserver_info + - azure.azcollection.azure_rm_privatednsrecordset + - azure.azcollection.azure_rm_privatednsrecordset_info + - azure.azcollection.azure_rm_privatednszone + - azure.azcollection.azure_rm_privatednszone_info + - azure.azcollection.azure_rm_privatednszonelink + - azure.azcollection.azure_rm_privatednszonelink_info + - azure.azcollection.azure_rm_privateendpoint + - azure.azcollection.azure_rm_privateendpoint_info + - azure.azcollection.azure_rm_privateendpointconnection + - azure.azcollection.azure_rm_privateendpointconnection_info + - azure.azcollection.azure_rm_privateendpointdnszonegroup + - azure.azcollection.azure_rm_privateendpointdnszonegroup_info + - azure.azcollection.azure_rm_privatelinkservice + - azure.azcollection.azure_rm_privatelinkservice_info + - azure.azcollection.azure_rm_proximityplacementgroup + - azure.azcollection.azure_rm_proximityplacementgroup_info + - azure.azcollection.azure_rm_publicipaddress + - azure.azcollection.azure_rm_publicipaddress_info + - azure.azcollection.azure_rm_recoveryservicesvault + - azure.azcollection.azure_rm_recoveryservicesvault_info + - azure.azcollection.azure_rm_rediscache + - azure.azcollection.azure_rm_rediscache_info + - azure.azcollection.azure_rm_rediscachefirewallrule + - azure.azcollection.azure_rm_registrationassignment + - azure.azcollection.azure_rm_registrationassignment_info + - azure.azcollection.azure_rm_registrationdefinition + - azure.azcollection.azure_rm_registrationdefinition_info + - azure.azcollection.azure_rm_resource + - azure.azcollection.azure_rm_resource_info + - azure.azcollection.azure_rm_resourcegroup + - azure.azcollection.azure_rm_resourcegroup_info + - azure.azcollection.azure_rm_roleassignment + - azure.azcollection.azure_rm_roleassignment_info + - azure.azcollection.azure_rm_roledefinition + - azure.azcollection.azure_rm_roledefinition_info + - azure.azcollection.azure_rm_route + - azure.azcollection.azure_rm_route_info + - azure.azcollection.azure_rm_routetable + - azure.azcollection.azure_rm_routetable_info + - azure.azcollection.azure_rm_securitygroup + - azure.azcollection.azure_rm_securitygroup_info + - azure.azcollection.azure_rm_servicebus + - azure.azcollection.azure_rm_servicebus_info + - azure.azcollection.azure_rm_servicebusqueue + - azure.azcollection.azure_rm_servicebussaspolicy + - azure.azcollection.azure_rm_servicebustopic + - azure.azcollection.azure_rm_servicebustopicsubscription + - azure.azcollection.azure_rm_snapshot + - azure.azcollection.azure_rm_sqldatabase + - azure.azcollection.azure_rm_sqldatabase_info + - azure.azcollection.azure_rm_sqlfirewallrule + - azure.azcollection.azure_rm_sqlfirewallrule_info + - azure.azcollection.azure_rm_sqlserver + - azure.azcollection.azure_rm_sqlserver_info + - azure.azcollection.azure_rm_sqlmanagedinstance + - azure.azcollection.azure_rm_sqlmanagedinstance_info + - azure.azcollection.azure_rm_storageaccount + - azure.azcollection.azure_rm_storageaccount_info + - azure.azcollection.azure_rm_storageblob + - azure.azcollection.azure_rm_storageshare + - azure.azcollection.azure_rm_storageshare_info + - azure.azcollection.azure_rm_subnet + - azure.azcollection.azure_rm_subnet_info + - azure.azcollection.azure_rm_subscription_info + - azure.azcollection.azure_rm_trafficmanager + - azure.azcollection.azure_rm_trafficmanagerendpoint + - azure.azcollection.azure_rm_trafficmanagerendpoint_info + - azure.azcollection.azure_rm_trafficmanagerprofile + - azure.azcollection.azure_rm_trafficmanagerprofile_info + - azure.azcollection.azure_rm_virtualhub + - azure.azcollection.azure_rm_virtualhub_info + - azure.azcollection.azure_rm_virtualhubconnection + - azure.azcollection.azure_rm_virtualhubconnection_info + - azure.azcollection.azure_rm_virtualmachine + - azure.azcollection.azure_rm_virtualmachine_info + - azure.azcollection.azure_rm_virtualmachineextension + - azure.azcollection.azure_rm_virtualmachineextension_info + - azure.azcollection.azure_rm_virtualmachineimage_info + - azure.azcollection.azure_rm_virtualmachinescaleset + - azure.azcollection.azure_rm_virtualmachinescaleset_info + - azure.azcollection.azure_rm_virtualmachinescalesetextension + - azure.azcollection.azure_rm_virtualmachinescalesetextension_info + - azure.azcollection.azure_rm_virtualmachinescalesetinstance + - azure.azcollection.azure_rm_virtualmachinescalesetinstance_info + - azure.azcollection.azure_rm_virtualmachinesize_info + - azure.azcollection.azure_rm_virtualnetwork + - azure.azcollection.azure_rm_virtualnetwork_info + - azure.azcollection.azure_rm_virtualnetworkgateway + - azure.azcollection.azure_rm_virtualnetworkpeering + - azure.azcollection.azure_rm_virtualnetworkpeering_info + - azure.azcollection.azure_rm_virtualwan + - azure.azcollection.azure_rm_virtualwan_info + - azure.azcollection.azure_rm_vmbackuppolicy + - azure.azcollection.azure_rm_vmbackuppolicy_info + - azure.azcollection.azure_rm_vpnsite + - azure.azcollection.azure_rm_vpnsite_info + - azure.azcollection.azure_rm_vpnsitelink_info + - azure.azcollection.azure_rm_webapp + - azure.azcollection.azure_rm_webapp_info + - azure.azcollection.azure_rm_webappaccessrestriction + - azure.azcollection.azure_rm_webappaccessrestriction_info + - azure.azcollection.azure_rm_webappslot + - azure.azcollection.azure_rm_webappvnetconnection + - azure.azcollection.azure_rm_webappvnetconnection_info diff --git a/ansible_collections/azure/azcollection/plugins/doc_fragments/azure.py b/ansible_collections/azure/azcollection/plugins/doc_fragments/azure.py new file mode 100644 index 000000000..bc382e401 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/doc_fragments/azure.py @@ -0,0 +1,143 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016 Matt Davis, +# Copyright: (c) 2016 Chris Houseknecht, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class ModuleDocFragment(object): + + # Azure doc fragment + DOCUMENTATION = r''' + +options: + ad_user: + description: + - Active Directory username. Use when authenticating with an Active Directory user rather than service + principal. + type: str + password: + description: + - Active Directory user password. Use when authenticating with an Active Directory user rather than service + principal. + type: str + profile: + description: + - Security profile found in ~/.azure/credentials file. + type: str + subscription_id: + description: + - Your Azure subscription Id. + type: str + client_id: + description: + - Azure client ID. Use when authenticating with a Service Principal. + type: str + secret: + description: + - Azure client secret. Use when authenticating with a Service Principal. + type: str + tenant: + description: + - Azure tenant ID. Use when authenticating with a Service Principal. + type: str + cloud_environment: + description: + - For cloud environments other than the US public cloud, the environment name (as defined by Azure Python SDK, eg, C(AzureChinaCloud), + C(AzureUSGovernment)), or a metadata discovery endpoint URL (required for Azure Stack). Can also be set via credential file profile or + the C(AZURE_CLOUD_ENVIRONMENT) environment variable. + type: str + default: AzureCloud + version_added: '0.0.1' + adfs_authority_url: + description: + - Azure AD authority url. Use when authenticating with Username/password, and has your own ADFS authority. + type: str + version_added: '0.0.1' + cert_validation_mode: + description: + - Controls the certificate validation behavior for Azure endpoints. By default, all modules will validate the server certificate, but + when an HTTPS proxy is in use, or against Azure Stack, it may be necessary to disable this behavior by passing C(ignore). Can also be + set via credential file profile or the C(AZURE_CERT_VALIDATION) environment variable. + type: str + choices: [ ignore, validate ] + version_added: '0.0.1' + auth_source: + description: + - Controls the source of the credentials to use for authentication. + - Can also be set via the C(ANSIBLE_AZURE_AUTH_SOURCE) environment variable. + - When set to C(auto) (the default) the precedence is module parameters -> C(env) -> C(credential_file) -> C(cli). + - When set to C(env), the credentials will be read from the environment variables + - When set to C(credential_file), it will read the profile from C(~/.azure/credentials). + - When set to C(cli), the credentials will be sources from the Azure CLI profile. C(subscription_id) or the environment variable + C(AZURE_SUBSCRIPTION_ID) can be used to identify the subscription ID if more than one is present otherwise the default + az cli subscription is used. + - When set to C(msi), the host machine must be an azure resource with an enabled MSI extension. C(subscription_id) or the + environment variable C(AZURE_SUBSCRIPTION_ID) can be used to identify the subscription ID if the resource is granted + access to more than one subscription, otherwise the first subscription is chosen. + - The C(msi) was added in Ansible 2.6. + type: str + default: auto + choices: + - auto + - cli + - credential_file + - env + - msi + version_added: '0.0.1' + api_profile: + description: + - Selects an API profile to use when communicating with Azure services. Default value of C(latest) is appropriate for public clouds; + future values will allow use with Azure Stack. + type: str + default: latest + version_added: '0.0.1' + log_path: + description: + - Parent argument. + type: str + log_mode: + description: + - Parent argument. + type: str + x509_certificate_path: + description: + - Path to the X509 certificate used to create the service principal in PEM format. + - The certificate must be appended to the private key. + - Use when authenticating with a Service Principal. + type: path + version_added: '1.14.0' + thumbprint: + description: + - The thumbprint of the private key specified in I(x509_certificate_path). + - Use when authenticating with a Service Principal. + - Required if I(x509_certificate_path) is defined. + type: str + version_added: '1.14.0' +requirements: + - python >= 2.7 + - The host that executes this module must have the azure.azcollection collection installed via galaxy + - All python packages listed in collection's requirements-azure.txt must be installed via pip on the host that executes modules from azure.azcollection + - Full installation instructions may be found https://galaxy.ansible.com/azure/azcollection + +notes: + - For authentication with Azure you can pass parameters, set environment variables, use a profile stored + in ~/.azure/credentials, or log in before you run your tasks or playbook with C(az login). + - Authentication is also possible using a service principal or Active Directory user. + - To authenticate via service principal, pass subscription_id, client_id, secret and tenant or set environment + variables AZURE_SUBSCRIPTION_ID, AZURE_CLIENT_ID, AZURE_SECRET and AZURE_TENANT. + - To authenticate via Active Directory user, pass ad_user and password, or set AZURE_AD_USER and + AZURE_PASSWORD in the environment. + - "Alternatively, credentials can be stored in ~/.azure/credentials. This is an ini file containing + a [default] section and the following keys: subscription_id, client_id, secret and tenant or + subscription_id, ad_user and password. It is also possible to add additional profiles. Specify the profile + by passing profile or setting AZURE_PROFILE in the environment." + +seealso: + - name: Sign in with Azure CLI + link: https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli?view=azure-cli-latest + description: How to authenticate using the C(az login) command. + ''' diff --git a/ansible_collections/azure/azcollection/plugins/doc_fragments/azure_rm.py b/ansible_collections/azure/azcollection/plugins/doc_fragments/azure_rm.py new file mode 100644 index 000000000..8d860d863 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/doc_fragments/azure_rm.py @@ -0,0 +1,95 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016 Matt Davis, +# Copyright: (c) 2016 Chris Houseknecht, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class ModuleDocFragment(object): + + # Azure doc fragment + DOCUMENTATION = r''' +options: + plugin: + description: marks this as an instance of the 'azure_rm' plugin + required: true + choices: ['azure_rm', 'azure.azcollection.azure_rm'] + include_vm_resource_groups: + description: A list of resource group names to search for virtual machines. '\*' will include all resource + groups in the subscription. Can also be set comma separated resource group names via the + C(ANSIBLE_AZURE_VM_RESOURCE_GROUPS) environment variable. + default: ['*'] + include_vmss_resource_groups: + description: A list of resource group names to search for virtual machine scale sets (VMSSs). '\*' will + include all resource groups in the subscription. + default: [] + fail_on_template_errors: + description: When false, template failures during group and filter processing are silently ignored (eg, + if a filter or group expression refers to an undefined host variable) + choices: [True, False] + default: True + keyed_groups: + description: Creates groups based on the value of a host variable. Requires a list of dictionaries, + defining C(key) (the source dictionary-typed variable), C(prefix) (the prefix to use for the new group + name), and optionally C(separator) (which defaults to C(_)) + conditional_groups: + description: A mapping of group names to Jinja2 expressions. When the mapped expression is true, the host + is added to the named group. + hostvar_expressions: + description: A mapping of hostvar names to Jinja2 expressions. The value for each host is the result of the + Jinja2 expression (which may refer to any of the host's existing variables at the time this inventory + plugin runs). + exclude_host_filters: + description: Excludes hosts from the inventory with a list of Jinja2 conditional expressions. Each + expression in the list is evaluated for each host; when the expression is true, the host is excluded + from the inventory. + default: [] + batch_fetch: + description: To improve performance, results are fetched using an unsupported batch API. Disabling + C(batch_fetch) uses a much slower serial fetch, resulting in many more round-trips. Generally only + useful for troubleshooting. + default: true + default_host_filters: + description: A default set of filters that is applied in addition to the conditions in + C(exclude_host_filters) to exclude powered-off and not-fully-provisioned hosts. Set this to a different + value or empty list if you need to include hosts in these states. + default: ['powerstate != "running"', 'provisioning_state != "succeeded"'] + use_contrib_script_compatible_sanitization: + description: + - By default this plugin is using a general group name sanitization to create safe and usable group names for use in Ansible. + This option allows you to override that, in efforts to allow migration from the old inventory script and + matches the sanitization of groups when the script's ``replace_dash_in_groups`` option is set to ``False``. + To replicate behavior of ``replace_dash_in_groups = True`` with constructed groups, + you will need to replace hyphens with underscores via the regex_replace filter for those entries. + - For this to work you should also turn off the TRANSFORM_INVALID_GROUP_CHARS setting, + otherwise the core engine will just use the standard sanitization on top. + - This is not the default as such names break certain functionality as not all characters are valid Python identifiers + which group names end up being used as. + type: bool + default: False + version_added: '0.0.1' + plain_host_names: + description: + - By default this plugin will use globally unique host names. + This option allows you to override that, and use the name that matches the old inventory script naming. + - This is not the default, as these names are not truly unique, and can conflict with other hosts. + The default behavior will add extra hashing to the end of the hostname to prevent such conflicts. + type: bool + default: False + version_added: '0.0.1' + hostnames: + description: + - A list of Jinja2 expressions in order of precedence to compose inventory_hostname. + - Ignores expression if result is an empty string or None value. + - By default, inventory_hostname is generated to be globally unique based on the VM host name. + See C(plain_host_names) for more details on the default. + - An expression of 'default' will force using the default hostname generator if no previous hostname expression + resulted in a valid hostname. + - Use ``default_inventory_hostname`` to access the default hostname generator's value in any of the Jinja2 expressions. + type: list + elements: str + default: [default] +''' diff --git a/ansible_collections/azure/azcollection/plugins/doc_fragments/azure_tags.py b/ansible_collections/azure/azcollection/plugins/doc_fragments/azure_tags.py new file mode 100644 index 000000000..8edb80eed --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/doc_fragments/azure_tags.py @@ -0,0 +1,31 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Matt Davis, +# Copyright: (c) 2016, Chris Houseknecht, +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +class ModuleDocFragment(object): + + # Azure doc fragment + DOCUMENTATION = r''' +options: + tags: + description: + - Dictionary of string:string pairs to assign as metadata to the object. + - Metadata tags on the object will be updated with any provided values. + - To remove tags set append_tags option to false. + - Currently, Azure DNS zones and Traffic Manager services also don't allow the use of spaces in the tag. + - Azure Front Door doesn't support the use of # in the tag name. + - Azure Automation and Azure CDN only support 15 tags on resources. + type: dict + append_tags: + description: + - Use to control if tags field is canonical or just appends to existing tags. + - When canonical, any tags not found in the tags parameter will be removed from the object's metadata. + type: bool + default: yes + ''' diff --git a/ansible_collections/azure/azcollection/plugins/inventory/azure_rm.py b/ansible_collections/azure/azcollection/plugins/inventory/azure_rm.py new file mode 100644 index 000000000..e570b59a9 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/inventory/azure_rm.py @@ -0,0 +1,656 @@ +# Copyright (c) 2018 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = r''' + name: azure_rm + short_description: Azure Resource Manager inventory plugin + extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_rm + - constructed + description: + - Query VM details from Azure Resource Manager + - Requires a YAML configuration file whose name ends with 'azure_rm.(yml|yaml)' + - By default, sets C(ansible_host) to the first public IP address found (preferring the primary NIC). If no + public IPs are found, the first private IP (also preferring the primary NIC). The default may be overridden + via C(hostvar_expressions); see examples. +''' + +EXAMPLES = ''' +# The following host variables are always available: +# public_ipv4_addresses: all public IP addresses, with the primary IP config from the primary NIC first +# public_dns_hostnames: all public DNS hostnames, with the primary IP config from the primary NIC first +# private_ipv4_addresses: all private IP addressses, with the primary IP config from the primary NIC first +# id: the VM's Azure resource ID, eg /subscriptions/00000000-0000-0000-1111-1111aaaabb/resourceGroups/my_rg/providers/Microsoft.Compute/virtualMachines/my_vm +# location: the VM's Azure location, eg 'westus', 'eastus' +# name: the VM's resource name, eg 'myvm' +# os_profile: The VM OS properties, a dictionary, only system is currently available, eg 'os_profile.system not in ['linux']' +# powerstate: the VM's current power state, eg: 'running', 'stopped', 'deallocated' +# provisioning_state: the VM's current provisioning state, eg: 'succeeded' +# tags: dictionary of the VM's defined tag values +# resource_type: the VM's resource type, eg: 'Microsoft.Compute/virtualMachine', 'Microsoft.Compute/virtualMachineScaleSets/virtualMachines' +# vmid: the VM's internal SMBIOS ID, eg: '36bca69d-c365-4584-8c06-a62f4a1dc5d2' +# vmss: if the VM is a member of a scaleset (vmss), a dictionary including the id and name of the parent scaleset +# availability_zone: availability zone in which VM is deployed, eg '1','2','3' +# +# The following host variables are sometimes availble: +# computer_name: the Operating System's hostname. Will not be available if azure agent is not available and picking it up. + + +# sample 'myazuresub.azure_rm.yaml' + +# required for all azure_rm inventory plugin configs +plugin: azure.azcollection.azure_rm + +# forces this plugin to use a CLI auth session instead of the automatic auth source selection (eg, prevents the +# presence of 'ANSIBLE_AZURE_RM_X' environment variables from overriding CLI auth) +auth_source: cli + +# fetches VMs from an explicit list of resource groups instead of default all (- '*') +include_vm_resource_groups: +- myrg1 +- myrg2 + +# fetches VMs from VMSSs in all resource groups (defaults to no VMSS fetch) +include_vmss_resource_groups: +- '*' + +# places a host in the named group if the associated condition evaluates to true +conditional_groups: + # since this will be true for every host, every host sourced from this inventory plugin config will be in the + # group 'all_the_hosts' + all_the_hosts: true + # if the VM's "name" variable contains "dbserver", it will be placed in the 'db_hosts' group + db_hosts: "'dbserver' in name" + +# adds variables to each host found by this inventory plugin, whose values are the result of the associated expression +hostvar_expressions: + my_host_var: + # A statically-valued expression has to be both single and double-quoted, or use escaped quotes, since the outer + # layer of quotes will be consumed by YAML. Without the second set of quotes, it interprets 'staticvalue' as a + # variable instead of a string literal. + some_statically_valued_var: "'staticvalue'" + # overrides the default ansible_host value with a custom Jinja2 expression, in this case, the first DNS hostname, or + # if none are found, the first public IP address. + ansible_host: (public_dns_hostnames + public_ipv4_addresses) | first + +# change how inventory_hostname is generated. Each item is a jinja2 expression similar to hostvar_expressions. +hostnames: + - tags.vm_name + - default # special var that uses the default hashed name + +# places hosts in dynamically-created groups based on a variable value. +keyed_groups: +# places each host in a group named 'tag_(tag name)_(tag value)' for each tag on a VM. +- prefix: tag + key: tags +# places each host in a group named 'azure_loc_(location name)', depending on the VM's location +- prefix: azure_loc + key: location +# places host in a group named 'some_tag_X' using the value of the 'sometag' tag on a VM as X, and defaulting to the +# value 'none' (eg, the group 'some_tag_none') if the 'sometag' tag is not defined for a VM. +- prefix: some_tag + key: tags.sometag | default('none') + +# excludes a host from the inventory when any of these expressions is true, can refer to any vars defined on the host +exclude_host_filters: +# excludes hosts in the eastus region +- location in ['eastus'] +- tags['tagkey'] is defined and tags['tagkey'] == 'tagkey' +- tags['tagkey2'] is defined and tags['tagkey2'] == 'tagkey2' +# excludes hosts that are powered off +- powerstate != 'running' +''' + +# FUTURE: do we need a set of sane default filters, separate from the user-defineable ones? +# eg, powerstate==running, provisioning_state==succeeded + + +import hashlib +import json +import re +import uuid +import os + +try: + from queue import Queue, Empty +except ImportError: + from Queue import Queue, Empty + +from collections import namedtuple +from ansible import release +from ansible.plugins.inventory import BaseInventoryPlugin, Constructable +from ansible.module_utils.six import iteritems +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMAuth +from ansible.errors import AnsibleParserError, AnsibleError +from ansible.module_utils.parsing.convert_bool import boolean +from ansible.module_utils._text import to_native, to_bytes, to_text +from itertools import chain + +try: + from msrest import ServiceClient, Serializer, Deserializer + from msrestazure import AzureConfiguration + from msrestazure.polling.arm_polling import ARMPolling + from msrestazure.tools import parse_resource_id +except ImportError: + AzureConfiguration = object + ARMPolling = object + parse_resource_id = object + ServiceClient = object + Serializer = object + Deserializer = object + pass + + +class AzureRMRestConfiguration(AzureConfiguration): + def __init__(self, credentials, subscription_id, base_url=None): + + if credentials is None: + raise ValueError("Parameter 'credentials' must not be None.") + if subscription_id is None: + raise ValueError("Parameter 'subscription_id' must not be None.") + if not base_url: + base_url = 'https://management.azure.com' + + super(AzureRMRestConfiguration, self).__init__(base_url) + + self.add_user_agent('ansible-dynamic-inventory/{0}'.format(release.__version__)) + + self.credentials = credentials + self.subscription_id = subscription_id + + +UrlAction = namedtuple('UrlAction', ['url', 'api_version', 'handler', 'handler_args']) + + +# FUTURE: add Cacheable support once we have a sane serialization format +class InventoryModule(BaseInventoryPlugin, Constructable): + + NAME = 'azure.azcollection.azure_rm' + + def __init__(self): + super(InventoryModule, self).__init__() + + self._serializer = Serializer() + self._deserializer = Deserializer() + self._hosts = [] + self._filters = None + + # FUTURE: use API profiles with defaults + self._compute_api_version = '2017-03-30' + self._network_api_version = '2015-06-15' + + self._default_header_parameters = {'Content-Type': 'application/json; charset=utf-8'} + + self._request_queue = Queue() + + self.azure_auth = None + + self._batch_fetch = False + + def verify_file(self, path): + ''' + :param loader: an ansible.parsing.dataloader.DataLoader object + :param path: the path to the inventory config file + :return the contents of the config file + ''' + if super(InventoryModule, self).verify_file(path): + if re.match(r'.{0,}azure_rm\.y(a)?ml$', path): + return True + # display.debug("azure_rm inventory filename must end with 'azure_rm.yml' or 'azure_rm.yaml'") + return False + + def parse(self, inventory, loader, path, cache=True): + super(InventoryModule, self).parse(inventory, loader, path) + + self._read_config_data(path) + + if self.get_option('use_contrib_script_compatible_sanitization'): + self._sanitize_group_name = self._legacy_script_compatible_group_sanitization + + self._batch_fetch = self.get_option('batch_fetch') + + self._legacy_hostnames = self.get_option('plain_host_names') + + self._filters = self.get_option('exclude_host_filters') + self.get_option('default_host_filters') + + try: + self._credential_setup() + self._get_hosts() + except Exception: + raise + + def _credential_setup(self): + auth_options = dict( + auth_source=self.get_option('auth_source'), + profile=self.get_option('profile'), + subscription_id=self.get_option('subscription_id'), + client_id=self.get_option('client_id'), + secret=self.get_option('secret'), + tenant=self.get_option('tenant'), + ad_user=self.get_option('ad_user'), + password=self.get_option('password'), + cloud_environment=self.get_option('cloud_environment'), + cert_validation_mode=self.get_option('cert_validation_mode'), + api_profile=self.get_option('api_profile'), + adfs_authority_url=self.get_option('adfs_authority_url') + ) + + self.azure_auth = AzureRMAuth(**auth_options) + + self._clientconfig = AzureRMRestConfiguration(self.azure_auth.azure_credentials, self.azure_auth.subscription_id, + self.azure_auth._cloud_environment.endpoints.resource_manager) + self._client = ServiceClient(self._clientconfig.credentials, self._clientconfig) + + def _enqueue_get(self, url, api_version, handler, handler_args=None): + if not handler_args: + handler_args = {} + self._request_queue.put_nowait(UrlAction(url=url, api_version=api_version, handler=handler, handler_args=handler_args)) + + def _enqueue_vm_list(self, rg='*'): + if not rg or rg == '*': + url = '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/virtualMachines' + else: + url = '/subscriptions/{subscriptionId}/resourceGroups/{rg}/providers/Microsoft.Compute/virtualMachines' + + url = url.format(subscriptionId=self._clientconfig.subscription_id, rg=rg) + self._enqueue_get(url=url, api_version=self._compute_api_version, handler=self._on_vm_page_response) + + def _enqueue_vmss_list(self, rg=None): + if not rg or rg == '*': + url = '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/virtualMachineScaleSets' + else: + url = '/subscriptions/{subscriptionId}/resourceGroups/{rg}/providers/Microsoft.Compute/virtualMachineScaleSets' + + url = url.format(subscriptionId=self._clientconfig.subscription_id, rg=rg) + self._enqueue_get(url=url, api_version=self._compute_api_version, handler=self._on_vmss_page_response) + + def _get_hosts(self): + if os.environ.get('ANSIBLE_AZURE_VM_RESOURCE_GROUPS'): + for vm_rg in os.environ['ANSIBLE_AZURE_VM_RESOURCE_GROUPS'].split(","): + self._enqueue_vm_list(vm_rg) + else: + for vm_rg in self.get_option('include_vm_resource_groups'): + self._enqueue_vm_list(vm_rg) + + for vmss_rg in self.get_option('include_vmss_resource_groups'): + self._enqueue_vmss_list(vmss_rg) + + if self._batch_fetch: + self._process_queue_batch() + else: + self._process_queue_serial() + + constructable_config_strict = boolean(self.get_option('fail_on_template_errors')) + if self.get_option('hostvar_expressions') is not None: + constructable_config_compose = self.get_option('hostvar_expressions') + else: + constructable_config_compose = self.get_option('compose') + constructable_config_groups = self.get_option('conditional_groups') + constructable_config_keyed_groups = self.get_option('keyed_groups') + + constructable_hostnames = self.get_option('hostnames') + + for h in self._hosts: + # FUTURE: track hostnames to warn if a hostname is repeated (can happen for legacy and for composed inventory_hostname) + inventory_hostname = self._get_hostname(h, hostnames=constructable_hostnames, strict=constructable_config_strict) + if self._filter_host(inventory_hostname, h.hostvars): + continue + self.inventory.add_host(inventory_hostname) + # FUTURE: configurable default IP list? can already do this via hostvar_expressions + self.inventory.set_variable(inventory_hostname, "ansible_host", + next(chain(h.hostvars['public_ipv4_addresses'], h.hostvars['private_ipv4_addresses']), None)) + for k, v in iteritems(h.hostvars): + # FUTURE: configurable hostvar prefix? Makes docs harder... + self.inventory.set_variable(inventory_hostname, k, v) + + # constructable delegation + self._set_composite_vars(constructable_config_compose, h.hostvars, inventory_hostname, strict=constructable_config_strict) + self._add_host_to_composed_groups(constructable_config_groups, h.hostvars, inventory_hostname, strict=constructable_config_strict) + self._add_host_to_keyed_groups(constructable_config_keyed_groups, h.hostvars, inventory_hostname, strict=constructable_config_strict) + + # FUTURE: fix underlying inventory stuff to allow us to quickly access known groupvars from reconciled host + def _filter_host(self, inventory_hostname, hostvars): + self.templar.available_variables = hostvars + + for condition in self._filters: + # FUTURE: should warn/fail if conditional doesn't return True or False + conditional = "{{% if {0} %}} True {{% else %}} False {{% endif %}}".format(condition) + try: + if boolean(self.templar.template(conditional)): + return True + except Exception as e: + if boolean(self.get_option('fail_on_template_errors')): + raise AnsibleParserError("Error evaluating filter condition '{0}' for host {1}: {2}".format(condition, inventory_hostname, to_native(e))) + continue + + return False + + def _get_hostname(self, host, hostnames=None, strict=False): + hostname = None + errors = [] + + for preference in hostnames: + if preference == 'default': + return host.default_inventory_hostname + try: + hostname = self._compose(preference, host.hostvars) + except Exception as e: # pylint: disable=broad-except + if strict: + raise AnsibleError("Could not compose %s as hostnames - %s" % (preference, to_native(e))) + else: + errors.append( + (preference, str(e)) + ) + if hostname: + return to_text(hostname) + + raise AnsibleError( + 'Could not template any hostname for host, errors for each preference: %s' % ( + ', '.join(['%s: %s' % (pref, err) for pref, err in errors]) + ) + ) + + def _process_queue_serial(self): + try: + while True: + item = self._request_queue.get_nowait() + resp = self.send_request(item.url, item.api_version) + item.handler(resp, **item.handler_args) + except Empty: + pass + + def _on_vm_page_response(self, response, vmss=None): + next_link = response.get('nextLink') + + if next_link: + self._enqueue_get(url=next_link, api_version=self._compute_api_version, handler=self._on_vm_page_response) + + if 'value' in response: + for h in response['value']: + # FUTURE: add direct VM filtering by tag here (performance optimization)? + self._hosts.append(AzureHost(h, self, vmss=vmss, legacy_name=self._legacy_hostnames)) + + def _on_vmss_page_response(self, response): + next_link = response.get('nextLink') + + if next_link: + self._enqueue_get(url=next_link, api_version=self._compute_api_version, handler=self._on_vmss_page_response) + + # FUTURE: add direct VMSS filtering by tag here (performance optimization)? + for vmss in response['value']: + url = '{0}/virtualMachines'.format(vmss['id']) + # VMSS instances look close enough to regular VMs that we can share the handler impl... + self._enqueue_get(url=url, api_version=self._compute_api_version, handler=self._on_vm_page_response, handler_args=dict(vmss=vmss)) + + # use the undocumented /batch endpoint to bulk-send up to 500 requests in a single round-trip + # + def _process_queue_batch(self): + while True: + batch_requests = [] + batch_item_index = 0 + batch_response_handlers = dict() + try: + while batch_item_index < 100: + item = self._request_queue.get_nowait() + + name = str(uuid.uuid4()) + query_parameters = {'api-version': item.api_version} + req = self._client.get(item.url, query_parameters) + batch_requests.append(dict(httpMethod="GET", url=req.url, name=name)) + batch_response_handlers[name] = item + batch_item_index += 1 + except Empty: + pass + + if not batch_requests: + break + + batch_resp = self._send_batch(batch_requests) + + key_name = None + if 'responses' in batch_resp: + key_name = 'responses' + elif 'value' in batch_resp: + key_name = 'value' + else: + raise AnsibleError("didn't find expected key responses/value in batch response") + + for idx, r in enumerate(batch_resp[key_name]): + status_code = r.get('httpStatusCode') + returned_name = r['name'] + result = batch_response_handlers[returned_name] + if status_code == 200: + # FUTURE: error-tolerant operation mode (eg, permissions) + # FUTURE: store/handle errors from individual handlers + result.handler(r['content'], **result.handler_args) + + def _send_batch(self, batched_requests): + url = '/batch' + query_parameters = {'api-version': '2015-11-01'} + + body_obj = dict(requests=batched_requests) + + body_content = self._serializer.body(body_obj, 'object') + + header = {'x-ms-client-request-id': str(uuid.uuid4())} + header.update(self._default_header_parameters) + + request = self._client.post(url, query_parameters) + initial_response = self._client.send(request, header, body_content) + + # FUTURE: configurable timeout? + poller = ARMPolling(timeout=2) + poller.initialize(client=self._client, + initial_response=initial_response, + deserialization_callback=lambda r: self._deserializer('object', r)) + + poller.run() + + return poller.resource() + + def send_request(self, url, api_version): + query_parameters = {'api-version': api_version} + req = self._client.get(url, query_parameters) + resp = self._client.send(req, self._default_header_parameters, stream=False) + + resp.raise_for_status() + content = resp.content + + return json.loads(content) + + @staticmethod + def _legacy_script_compatible_group_sanitization(name): + + # note that while this mirrors what the script used to do, it has many issues with unicode and usability in python + regex = re.compile(r"[^A-Za-z0-9\_\-]") + + return regex.sub('_', name) + +# VM list (all, N resource groups): VM -> InstanceView, N NICs, N PublicIPAddress) +# VMSS VMs (all SS, N specific SS, N resource groups?): SS -> VM -> InstanceView, N NICs, N PublicIPAddress) + + +class AzureHost(object): + _powerstate_regex = re.compile('^PowerState/(?P.+)$') + + def __init__(self, vm_model, inventory_client, vmss=None, legacy_name=False): + self._inventory_client = inventory_client + self._vm_model = vm_model + self._vmss = vmss + + self._instanceview = None + + self._powerstate = "unknown" + self.nics = [] + + if legacy_name: + self.default_inventory_hostname = vm_model['name'] + else: + # Azure often doesn't provide a globally-unique filename, so use resource name + a chunk of ID hash + self.default_inventory_hostname = '{0}_{1}'.format(vm_model['name'], hashlib.sha1(to_bytes(vm_model['id'])).hexdigest()[0:4]) + + self._hostvars = {} + + inventory_client._enqueue_get(url="{0}/instanceView".format(vm_model['id']), + api_version=self._inventory_client._compute_api_version, + handler=self._on_instanceview_response) + + nic_refs = vm_model['properties']['networkProfile']['networkInterfaces'] + for nic in nic_refs: + # single-nic instances don't set primary, so figure it out... + is_primary = nic.get('properties', {}).get('primary', len(nic_refs) == 1) + inventory_client._enqueue_get(url=nic['id'], api_version=self._inventory_client._network_api_version, + handler=self._on_nic_response, + handler_args=dict(is_primary=is_primary)) + + @property + def hostvars(self): + if self._hostvars != {}: + return self._hostvars + + system = "unknown" + if 'osProfile' in self._vm_model['properties']: + if 'linuxConfiguration' in self._vm_model['properties']['osProfile']: + system = 'linux' + if 'windowsConfiguration' in self._vm_model['properties']['osProfile']: + system = 'windows' + else: + osType = self._vm_model['properties']['storageProfile']['osDisk']['osType'] + if osType == 'Linux': + system = 'linux' + if osType == 'Windows': + system = 'windows' + av_zone = None + if 'zones' in self._vm_model: + av_zone = self._vm_model['zones'] + + new_hostvars = dict( + network_interface=[], + mac_address=[], + network_interface_id=[], + security_group_id=[], + security_group=[], + public_ipv4_addresses=[], + public_dns_hostnames=[], + private_ipv4_addresses=[], + id=self._vm_model['id'], + location=self._vm_model['location'], + name=self._vm_model['name'], + computer_name=self._vm_model['properties'].get('osProfile', {}).get('computerName'), + availability_zone=av_zone, + powerstate=self._powerstate, + provisioning_state=self._vm_model['properties']['provisioningState'].lower(), + tags=self._vm_model.get('tags', {}), + resource_type=self._vm_model.get('type', "unknown"), + vmid=self._vm_model['properties']['vmId'], + os_profile=dict( + system=system, + ), + vmss=dict( + id=self._vmss['id'], + name=self._vmss['name'], + ) if self._vmss else {}, + virtual_machine_size=self._vm_model['properties']['hardwareProfile']['vmSize'] if self._vm_model['properties'].get('hardwareProfile') else None, + plan=self._vm_model['properties']['plan']['name'] if self._vm_model['properties'].get('plan') else None, + resource_group=parse_resource_id(self._vm_model['id']).get('resource_group').lower(), + default_inventory_hostname=self.default_inventory_hostname, + ) + + # set nic-related values from the primary NIC first + for nic in sorted(self.nics, key=lambda n: n.is_primary, reverse=True): + # and from the primary IP config per NIC first + for ipc in sorted(nic._nic_model['properties']['ipConfigurations'], key=lambda i: i['properties'].get('primary', False), reverse=True): + private_ip = ipc['properties'].get('privateIPAddress') + if private_ip: + new_hostvars['private_ipv4_addresses'].append(private_ip) + pip_id = ipc['properties'].get('publicIPAddress', {}).get('id') + if pip_id: + new_hostvars['public_ip_id'] = pip_id + + pip = nic.public_ips[pip_id] + new_hostvars['public_ip_name'] = pip._pip_model['name'] + new_hostvars['public_ipv4_addresses'].append(pip._pip_model['properties'].get('ipAddress', None)) + pip_fqdn = pip._pip_model['properties'].get('dnsSettings', {}).get('fqdn') + if pip_fqdn: + new_hostvars['public_dns_hostnames'].append(pip_fqdn) + + new_hostvars['mac_address'].append(nic._nic_model['properties'].get('macAddress')) + new_hostvars['network_interface'].append(nic._nic_model['name']) + new_hostvars['network_interface_id'].append(nic._nic_model['id']) + new_hostvars['security_group_id'].append(nic._nic_model['properties']['networkSecurityGroup']['id']) \ + if nic._nic_model['properties'].get('networkSecurityGroup') else None + new_hostvars['security_group'].append(parse_resource_id(nic._nic_model['properties']['networkSecurityGroup']['id'])['resource_name']) \ + if nic._nic_model['properties'].get('networkSecurityGroup') else None + + # set image and os_disk + new_hostvars['image'] = {} + new_hostvars['os_disk'] = {} + new_hostvars['data_disks'] = [] + storageProfile = self._vm_model['properties'].get('storageProfile') + if storageProfile: + imageReference = storageProfile.get('imageReference') + if imageReference: + if imageReference.get('publisher'): + new_hostvars['image'] = dict( + sku=imageReference.get('sku'), + publisher=imageReference.get('publisher'), + version=imageReference.get('version'), + offer=imageReference.get('offer') + ) + elif imageReference.get('id'): + new_hostvars['image'] = dict( + id=imageReference.get('id') + ) + + osDisk = storageProfile.get('osDisk') + new_hostvars['os_disk'] = dict( + name=osDisk.get('name'), + operating_system_type=osDisk.get('osType').lower() if osDisk.get('osType') else None, + id=osDisk.get('managedDisk', {}).get('id') + ) + new_hostvars['data_disks'] = [ + dict( + name=dataDisk.get('name'), + lun=dataDisk.get('lun'), + id=dataDisk.get('managedDisk', {}).get('id') + ) for dataDisk in storageProfile.get('dataDisks', []) + ] + + self._hostvars = new_hostvars + return self._hostvars + + def _on_instanceview_response(self, vm_instanceview_model): + self._instanceview = vm_instanceview_model + self._powerstate = next((self._powerstate_regex.match(s.get('code', '')).group('powerstate') + for s in vm_instanceview_model.get('statuses', []) if self._powerstate_regex.match(s.get('code', ''))), 'unknown') + + def _on_nic_response(self, nic_model, is_primary=False): + nic = AzureNic(nic_model=nic_model, inventory_client=self._inventory_client, is_primary=is_primary) + self.nics.append(nic) + + +class AzureNic(object): + def __init__(self, nic_model, inventory_client, is_primary=False): + self._nic_model = nic_model + self.is_primary = is_primary + self._inventory_client = inventory_client + + self.public_ips = {} + + if nic_model.get('properties', {}).get('ipConfigurations'): + for ipc in nic_model['properties']['ipConfigurations']: + pip = ipc['properties'].get('publicIPAddress') + if pip: + self._inventory_client._enqueue_get(url=pip['id'], api_version=self._inventory_client._network_api_version, handler=self._on_pip_response) + + def _on_pip_response(self, pip_model): + self.public_ips[pip_model['id']] = AzurePip(pip_model) + + +class AzurePip(object): + def __init__(self, pip_model): + self._pip_model = pip_model diff --git a/ansible_collections/azure/azcollection/plugins/lookup/azure_keyvault_secret.py b/ansible_collections/azure/azcollection/plugins/lookup/azure_keyvault_secret.py new file mode 100644 index 000000000..a3c809fd6 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/lookup/azure_keyvault_secret.py @@ -0,0 +1,212 @@ +# Copyright (c) 2022 Hai Cao, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = """ +--- +name: azure_keyvault_secret +author: + - Hai Cao (@tk5eq) +version_added: '1.12.0' +requirements: + - requests + - azure + - msrest +short_description: Read secret from Azure Key Vault. +description: + - This lookup returns the content of secret saved in Azure Key Vault. + - When ansible host is MSI enabled Azure VM, user don't need provide any credential to access to Azure Key Vault. +options: + _terms: + description: Secret name, version can be included like secret_name/secret_version. + required: True + vault_url: + description: Url of Azure Key Vault. + required: True + client_id: + description: Client id of service principal that has access to the Azure Key Vault + secret: + description: Secret of the service principal. + tenant_id: + description: Tenant id of service principal. + subscription_id: + description: Your Azure subscription Id. +notes: + - If version is not provided, this plugin will return the latest version of the secret. + - If ansible is running on Azure Virtual Machine with MSI enabled, client_id, secret and tenant isn't required. + - For enabling MSI on Azure VM, please refer to this doc https://docs.microsoft.com/en-us/azure/active-directory/managed-service-identity/ + - After enabling MSI on Azure VM, remember to grant access of the Key Vault to the VM by adding a new Acess Policy in Azure Portal. + - If MSI is not enabled on ansible host, it's required to provide a valid service principal which has access to the key vault. + - To use a plugin from a collection, please reference the full namespace, collection name, and lookup plugin name that you want to use. +""" + +EXAMPLE = """ +- name: Look up secret when azure cli login + debug: + msg: msg: "{{ lookup('azure.azcollection.azure_keyvault_secret', 'testsecret', vault_url=key_vault_uri, subscription_id=subscription_id)}}" +- name: Look up secret when ansible host is MSI enabled Azure VM + debug: + msg: "the value of this secret is {{ + lookup( + 'azure.azcollection.azure_keyvault_secret', + 'testSecret/version', + vault_url='https://yourvault.vault.azure.net' + ) + }}" + +- name: Look up secret when ansible host is general VM + vars: + url: 'https://yourvault.vault.azure.net' + secretname: 'testSecret/version' + client_id: '123456789' + secret: 'abcdefg' + tenant: 'uvwxyz' + debug: + msg: "the value of this secret is {{ + lookup( + 'azure.azcollection.azure_keyvault_secret', + secretname, + vault_url=url, + client_id=client_id, + secret=secret, + tenant_id=tenant + ) + }}" + +# Example below creates an Azure Virtual Machine with SSH public key from key vault using 'azure_keyvault_secret' lookup plugin. +- name: Create Azure VM + hosts: localhost + connection: local + no_log: True + vars: + resource_group: myResourceGroup + vm_name: testvm + location: eastus + ssh_key: "{{ lookup('azure.azcollection.azure_keyvault_secret','myssh_key') }}" + - name: Create VM + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + vm_size: Standard_DS1_v2 + admin_username: azureuser + ssh_password_enabled: false + ssh_public_keys: + - path: /home/azureuser/.ssh/authorized_keys + key_data: "{{ ssh_key }}" + network_interfaces: "{{ vm_name }}" + image: + offer: UbuntuServer + publisher: Canonical + sku: 16.04-LTS + version: latest +""" + +RETURN = """ + _raw: + description: secret content string +""" + +from ansible.errors import AnsibleError, AnsibleParserError +from ansible.plugins.lookup import LookupBase +from ansible.utils.display import Display +try: + import requests + import logging + import os + from azure.common.credentials import ServicePrincipalCredentials, get_cli_profile + from azure.keyvault import KeyVaultClient + from msrest.exceptions import AuthenticationError, ClientRequestError + from azure.keyvault.models.key_vault_error import KeyVaultErrorException +except ImportError: + pass + +display = Display() + +TOKEN_ACQUIRED = False + +token_params = { + 'api-version': '2018-02-01', + 'resource': 'https://vault.azure.net' +} + +token_headers = { + 'Metadata': 'true' +} + +token = None + +try: + token_res = requests.get('http://169.254.169.254/metadata/identity/oauth2/token', params=token_params, headers=token_headers, timeout=(3.05, 27)) + if token_res.ok: + token = token_res.json().get("access_token") + if token is not None: + TOKEN_ACQUIRED = True + else: + display.v('Successfully called MSI endpoint, but no token was available. Will use service principal if provided.') + else: + display.v("Unable to query MSI endpoint, Error Code %s. Will use service principal if provided" % token_res.status_code) +except Exception: + display.v('Unable to fetch MSI token. Will use service principal if provided.') + TOKEN_ACQUIRED = False + + +def lookup_secret_non_msi(terms, vault_url, kwargs): + logging.getLogger('msrestazure.azure_active_directory').addHandler(logging.NullHandler()) + logging.getLogger('msrest.service_client').addHandler(logging.NullHandler()) + + client_id = kwargs['client_id'] if kwargs.get('client_id') else os.environ.get('AZURE_CLIENT_ID') + secret = kwargs['secret'] if kwargs.get('secret') else os.environ.get('AZURE_SECRET') + tenant_id = kwargs['tenant_id'] if kwargs.get('tenant_id') else os.environ.get('AZURE_TENANT') + subscription_id = kwargs['subscription_id'] if kwargs.get('subscription_id') else os.environ.get('AZURE_SUBSCRIPTION_ID') + + try: + if client_id is not None and secret is not None and tenant_id is not None: + credentials = ServicePrincipalCredentials( + client_id=client_id, + secret=secret, + tenant=tenant_id + ) + elif subscription_id is not None: + profile = get_cli_profile() + credentials, subscription_id, tenant = profile.get_login_credentials( + subscription_id=subscription_id, resource="https://vault.azure.net") + client = KeyVaultClient(credentials) + except AuthenticationError: + raise AnsibleError('Invalid credentials provided.') + + ret = [] + for term in terms: + try: + secret_val = client.get_secret(vault_url, term, '').value + ret.append(secret_val) + except ClientRequestError: + raise AnsibleError('Error occurred in request') + except KeyVaultErrorException: + raise AnsibleError('Failed to fetch secret ' + term + '.') + return ret + + +class LookupModule(LookupBase): + + def run(self, terms, variables, **kwargs): + ret = [] + vault_url = kwargs.pop('vault_url', None) + if vault_url is None: + raise AnsibleError('Failed to get valid vault url.') + if TOKEN_ACQUIRED: + secret_params = {'api-version': '2016-10-01'} + secret_headers = {'Authorization': 'Bearer ' + token} + for term in terms: + try: + secret_res = requests.get(vault_url + '/secrets/' + term, params=secret_params, headers=secret_headers) + ret.append(secret_res.json()["value"]) + except KeyError: + raise AnsibleError('Failed to fetch secret ' + term + '.') + except Exception: + raise AnsibleError('Failed to fetch secret: ' + term + ' via MSI endpoint.') + return ret + else: + return lookup_secret_non_msi(terms, vault_url, kwargs) diff --git a/ansible_collections/azure/azcollection/plugins/module_utils/azure_rm_common.py b/ansible_collections/azure/azcollection/plugins/module_utils/azure_rm_common.py new file mode 100644 index 000000000..42026fac1 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/module_utils/azure_rm_common.py @@ -0,0 +1,1845 @@ +# Copyright (c) 2016 Matt Davis, +# Chris Houseknecht, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +import os +import re +import types +import copy +import inspect +import traceback +import json + +try: + from azure.graphrbac import GraphRbacManagementClient +except Exception: + pass +from os.path import expanduser + +from ansible.module_utils.basic import \ + AnsibleModule, missing_required_lib, env_fallback + +try: + from ansible.module_utils.ansible_release import __version__ as ANSIBLE_VERSION +except Exception: + ANSIBLE_VERSION = 'unknown' +from ansible.module_utils.six.moves import configparser +import ansible.module_utils.six.moves.urllib.parse as urlparse + +AZURE_COMMON_ARGS = dict( + auth_source=dict( + type='str', + choices=['auto', 'cli', 'env', 'credential_file', 'msi'], + fallback=(env_fallback, ['ANSIBLE_AZURE_AUTH_SOURCE']), + default="auto" + ), + profile=dict(type='str'), + subscription_id=dict(type='str'), + client_id=dict(type='str', no_log=True), + secret=dict(type='str', no_log=True), + tenant=dict(type='str', no_log=True), + ad_user=dict(type='str', no_log=True), + password=dict(type='str', no_log=True), + cloud_environment=dict(type='str', default='AzureCloud'), + cert_validation_mode=dict(type='str', choices=['validate', 'ignore']), + api_profile=dict(type='str', default='latest'), + adfs_authority_url=dict(type='str', default=None), + log_mode=dict(type='str', no_log=True), + log_path=dict(type='str', no_log=True), + x509_certificate_path=dict(type='path', no_log=True), + thumbprint=dict(type='str', no_log=True), +) + +AZURE_CREDENTIAL_ENV_MAPPING = dict( + profile='AZURE_PROFILE', + subscription_id='AZURE_SUBSCRIPTION_ID', + client_id='AZURE_CLIENT_ID', + secret='AZURE_SECRET', + tenant='AZURE_TENANT', + ad_user='AZURE_AD_USER', + password='AZURE_PASSWORD', + cloud_environment='AZURE_CLOUD_ENVIRONMENT', + cert_validation_mode='AZURE_CERT_VALIDATION_MODE', + adfs_authority_url='AZURE_ADFS_AUTHORITY_URL', + x509_certificate_path='AZURE_X509_CERTIFICATE_PATH', + thumbprint='AZURE_THUMBPRINT' +) + + +class SDKProfile(object): # pylint: disable=too-few-public-methods + + def __init__(self, default_api_version, profile=None): + """Constructor. + + :param str default_api_version: Default API version if not overridden by a profile. Nullable. + :param profile: A dict operation group name to API version. + :type profile: dict[str, str] + """ + self.profile = profile if profile is not None else {} + self.profile[None] = default_api_version + + @property + def default_api_version(self): + return self.profile[None] + + +# FUTURE: this should come from the SDK or an external location. +# For now, we have to copy from azure-cli +AZURE_API_PROFILES = { + 'latest': { + 'AuthorizationManagementClient': '2020-04-01-preview', + 'ContainerInstanceManagementClient': '2018-02-01-preview', + 'ComputeManagementClient': dict( + default_api_version='2018-10-01', + resource_skus='2018-10-01', + disks='2018-06-01', + snapshots='2018-10-01', + virtual_machine_run_commands='2018-10-01' + ), + 'ManagementGroupsClient': '2020-05-01', + 'NetworkManagementClient': '2019-11-01', + 'ResourceManagementClient': '2019-10-01', + 'SearchManagementClient': '2020-08-01', + 'StorageManagementClient': '2021-06-01', + 'SubscriptionClient': '2019-11-01', + 'WebSiteManagementClient': '2021-03-01', + 'PostgreSQLManagementClient': '2017-12-01', + 'MySQLManagementClient': '2017-12-01', + 'MariaDBManagementClient': '2019-03-01', + 'ManagementLockClient': '2016-09-01', + 'DataLakeStoreAccountManagementClient': '2016-11-01', + 'NotificationHubsManagementClient': '2016-03-01', + 'EventHubManagementClient': '2018-05-04' + }, + '2019-03-01-hybrid': { + 'StorageManagementClient': '2017-10-01', + 'NetworkManagementClient': '2017-10-01', + 'ComputeManagementClient': SDKProfile('2017-12-01', { + 'resource_skus': '2017-09-01', + 'disks': '2017-03-30', + 'snapshots': '2017-03-30' + }), + 'ManagementLinkClient': '2016-09-01', + 'ManagementLockClient': '2016-09-01', + 'PolicyClient': '2016-12-01', + 'ResourceManagementClient': '2018-05-01', + 'EventHubManagementClient': '2018-05-04', + 'SubscriptionClient': '2016-06-01', + 'DnsManagementClient': '2016-04-01', + 'KeyVaultManagementClient': '2016-10-01', + 'AuthorizationManagementClient': SDKProfile('2015-07-01', { + 'classic_administrators': '2015-06-01', + 'policy_assignments': '2016-12-01', + 'policy_definitions': '2016-12-01' + }), + 'KeyVaultClient': '2016-10-01', + 'azure.multiapi.storage': '2017-11-09', + 'azure.multiapi.cosmosdb': '2017-04-17' + }, + '2018-03-01-hybrid': { + 'StorageManagementClient': '2016-01-01', + 'NetworkManagementClient': '2017-10-01', + 'ComputeManagementClient': SDKProfile('2017-03-30'), + 'ManagementLinkClient': '2016-09-01', + 'ManagementLockClient': '2016-09-01', + 'PolicyClient': '2016-12-01', + 'ResourceManagementClient': '2018-02-01', + 'SubscriptionClient': '2016-06-01', + 'DnsManagementClient': '2016-04-01', + 'KeyVaultManagementClient': '2016-10-01', + 'AuthorizationManagementClient': SDKProfile('2015-07-01', { + 'classic_administrators': '2015-06-01' + }), + 'KeyVaultClient': '2016-10-01', + 'azure.multiapi.storage': '2017-04-17', + 'azure.multiapi.cosmosdb': '2017-04-17' + }, + '2017-03-09-profile': { + 'StorageManagementClient': '2016-01-01', + 'NetworkManagementClient': '2015-06-15', + 'ComputeManagementClient': SDKProfile('2016-03-30'), + 'ManagementLinkClient': '2016-09-01', + 'ManagementLockClient': '2015-01-01', + 'PolicyClient': '2015-10-01-preview', + 'ResourceManagementClient': '2016-02-01', + 'SubscriptionClient': '2016-06-01', + 'DnsManagementClient': '2016-04-01', + 'KeyVaultManagementClient': '2016-10-01', + 'AuthorizationManagementClient': SDKProfile('2015-07-01', { + 'classic_administrators': '2015-06-01' + }), + 'KeyVaultClient': '2016-10-01', + 'azure.multiapi.storage': '2015-04-05' + } +} + +AZURE_TAG_ARGS = dict( + tags=dict(type='dict'), + append_tags=dict(type='bool', default=True), +) + +AZURE_COMMON_REQUIRED_IF = [ + ('log_mode', 'file', ['log_path']) +] + +ANSIBLE_USER_AGENT = 'Ansible/{0}'.format(ANSIBLE_VERSION) +CLOUDSHELL_USER_AGENT_KEY = 'AZURE_HTTP_USER_AGENT' +VSCODEEXT_USER_AGENT_KEY = 'VSCODEEXT_USER_AGENT' + +CIDR_PATTERN = re.compile(r"(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1" + r"[0-9]{2}|2[0-4][0-9]|25[0-5])(/([0-9]|[1-2][0-9]|3[0-2]))") + +AZURE_SUCCESS_STATE = "Succeeded" +AZURE_FAILED_STATE = "Failed" + +HAS_AZURE = True +HAS_AZURE_EXC = None +HAS_AZURE_CLI_CORE = True +HAS_AZURE_CLI_CORE_EXC = None + +HAS_MSRESTAZURE = True +HAS_MSRESTAZURE_EXC = None + +try: + import importlib +except ImportError: + # This passes the sanity import test, but does not provide a user friendly error message. + # Doing so would require catching Exception for all imports of Azure dependencies in modules and module_utils. + importlib = None + +try: + from packaging.version import Version + HAS_PACKAGING_VERSION = True + HAS_PACKAGING_VERSION_EXC = None +except ImportError: + Version = None + HAS_PACKAGING_VERSION = False + HAS_PACKAGING_VERSION_EXC = traceback.format_exc() + +# NB: packaging issue sometimes cause msrestazure not to be installed, check it separately +try: + from msrest.serialization import Serializer +except ImportError: + HAS_MSRESTAZURE_EXC = traceback.format_exc() + HAS_MSRESTAZURE = False + +try: + from enum import Enum + from msrestazure.azure_active_directory import AADTokenCredentials + from msrestazure.azure_exceptions import CloudError + from msrestazure.azure_active_directory import MSIAuthentication + from azure.cli.core.auth.adal_authentication import MSIAuthenticationWrapper + from msrestazure.tools import parse_resource_id, resource_id, is_valid_resource_id + from msrestazure import azure_cloud + from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials + from azure.mgmt.network import NetworkManagementClient + from azure.mgmt.resource.resources import ResourceManagementClient + from azure.mgmt.managementgroups import ManagementGroupsAPI as ManagementGroupsClient + from azure.mgmt.resource.subscriptions import SubscriptionClient + from azure.mgmt.storage import StorageManagementClient + from azure.mgmt.compute import ComputeManagementClient + from azure.mgmt.dns import DnsManagementClient + from azure.mgmt.privatedns import PrivateDnsManagementClient + import azure.mgmt.privatedns.models as PrivateDnsModels + from azure.mgmt.monitor import MonitorManagementClient + from azure.mgmt.web import WebSiteManagementClient + from azure.mgmt.containerservice import ContainerServiceClient + from azure.mgmt.marketplaceordering import MarketplaceOrderingAgreements + from azure.mgmt.trafficmanager import TrafficManagerManagementClient + from azure.storage.blob import BlobServiceClient + from adal.authentication_context import AuthenticationContext + from azure.mgmt.authorization import AuthorizationManagementClient + from azure.mgmt.sql import SqlManagementClient + from azure.mgmt.servicebus import ServiceBusManagementClient + import azure.mgmt.servicebus.models as ServicebusModel + from azure.mgmt.rdbms.postgresql import PostgreSQLManagementClient + from azure.mgmt.rdbms.mysql import MySQLManagementClient + from azure.mgmt.rdbms.mariadb import MariaDBManagementClient + from azure.mgmt.containerregistry import ContainerRegistryManagementClient + from azure.mgmt.containerinstance import ContainerInstanceManagementClient + from azure.mgmt.loganalytics import LogAnalyticsManagementClient + import azure.mgmt.loganalytics.models as LogAnalyticsModels + from azure.mgmt.automation import AutomationClient + import azure.mgmt.automation.models as AutomationModel + from azure.mgmt.iothub import IotHubClient + from azure.mgmt.iothub import models as IoTHubModels + from msrest.service_client import ServiceClient + from msrestazure import AzureConfiguration + from msrest.authentication import Authentication + from azure.mgmt.resource.locks import ManagementLockClient + from azure.mgmt.recoveryservicesbackup import RecoveryServicesBackupClient + import azure.mgmt.recoveryservicesbackup.models as RecoveryServicesBackupModels + from azure.mgmt.search import SearchManagementClient + from azure.mgmt.datalake.store import DataLakeStoreAccountManagementClient + import azure.mgmt.datalake.store.models as DataLakeStoreAccountModel + from azure.mgmt.notificationhubs import NotificationHubsManagementClient + from azure.mgmt.eventhub import EventHubManagementClient + from azure.mgmt.datafactory import DataFactoryManagementClient + import azure.mgmt.datafactory.models as DataFactoryModel + from azure.identity._credentials import client_secret, user_password, certificate + +except ImportError as exc: + Authentication = object + HAS_AZURE_EXC = traceback.format_exc() + HAS_AZURE = False + +from base64 import b64encode, b64decode +from hashlib import sha256 +from hmac import HMAC +from time import time + +try: + from urllib import (urlencode, quote_plus) +except ImportError: + from urllib.parse import (urlencode, quote_plus) + +try: + from azure.cli.core.util import CLIError + from azure.common.credentials import get_cli_profile + from azure.common.cloud import get_cli_active_cloud +except ImportError: + HAS_AZURE_CLI_CORE = False + HAS_AZURE_CLI_CORE_EXC = None + CLIError = Exception + + +def azure_id_to_dict(id): + pieces = re.sub(r'^\/', '', id).split('/') + result = {} + index = 0 + while index < len(pieces) - 1: + result[pieces[index]] = pieces[index + 1] + index += 1 + return result + + +def format_resource_id(val, subscription_id, namespace, types, resource_group): + return resource_id(name=val, + resource_group=resource_group, + namespace=namespace, + type=types, + subscription=subscription_id) if not is_valid_resource_id(val) else val + + +def normalize_location_name(name): + return name.replace(' ', '').lower() + + +# FUTURE: either get this from the requirements file (if we can be sure it's always available at runtime) +# or generate the requirements files from this so we only have one source of truth to maintain... +AZURE_PKG_VERSIONS = { + 'StorageManagementClient': { + 'package_name': 'storage', + 'expected_version': '19.0.0' + }, + 'ComputeManagementClient': { + 'package_name': 'compute', + 'expected_version': '4.4.0' + }, + 'ContainerInstanceManagementClient': { + 'package_name': 'containerinstance', + 'expected_version': '9.0.0' + }, + 'NetworkManagementClient': { + 'package_name': 'network', + 'expected_version': '2.3.0' + }, + 'ResourceManagementClient': { + 'package_name': 'resource', + 'expected_version': '2.1.0' + }, + 'DnsManagementClient': { + 'package_name': 'dns', + 'expected_version': '8.0.0' + }, + 'PrivateDnsManagementClient': { + 'package_name': 'privatedns', + 'expected_version': '1.0.0' + }, + 'WebSiteManagementClient': { + 'package_name': 'web', + 'expected_version': '6.1.0' + }, + 'TrafficManagerManagementClient': { + 'package_name': 'trafficmanager', + 'expected_version': '1.0.0' + }, + 'EventHubManagementClient': { + 'package_name': 'azure-mgmt-eventhub', + 'expected_version': '2.0.0' + }, +} if HAS_AZURE else {} + + +AZURE_MIN_RELEASE = '2.0.0' + + +class AzureRMModuleBase(object): + def __init__(self, derived_arg_spec, bypass_checks=False, no_log=False, + check_invalid_arguments=None, mutually_exclusive=None, required_together=None, + required_one_of=None, add_file_common_args=False, supports_check_mode=False, + required_if=None, supports_tags=True, facts_module=False, skip_exec=False, is_ad_resource=False): + + merged_arg_spec = dict() + merged_arg_spec.update(AZURE_COMMON_ARGS) + if supports_tags: + merged_arg_spec.update(AZURE_TAG_ARGS) + + if derived_arg_spec: + merged_arg_spec.update(derived_arg_spec) + + merged_required_if = list(AZURE_COMMON_REQUIRED_IF) + if required_if: + merged_required_if += required_if + + self.module = AnsibleModule(argument_spec=merged_arg_spec, + bypass_checks=bypass_checks, + no_log=no_log, + mutually_exclusive=mutually_exclusive, + required_together=required_together, + required_one_of=required_one_of, + add_file_common_args=add_file_common_args, + supports_check_mode=supports_check_mode, + required_if=merged_required_if) + + if not HAS_PACKAGING_VERSION: + self.fail(msg=missing_required_lib('packaging'), + exception=HAS_PACKAGING_VERSION_EXC) + + if not HAS_MSRESTAZURE: + self.fail(msg=missing_required_lib('msrestazure'), + exception=HAS_MSRESTAZURE_EXC) + + if not HAS_AZURE: + self.fail(msg=missing_required_lib('ansible[azure] (azure >= {0})'.format(AZURE_MIN_RELEASE)), + exception=HAS_AZURE_EXC) + + self._authorization_client = None + self._network_client = None + self._storage_client = None + self._subscription_client = None + self._management_group_client = None + self._resource_client = None + self._compute_client = None + self._image_client = None + self._dns_client = None + self._private_dns_client = None + self._web_client = None + self._marketplace_client = None + self._sql_client = None + self._mysql_client = None + self._mariadb_client = None + self._postgresql_client = None + self._containerregistry_client = None + self._containerinstance_client = None + self._containerservice_client = None + self._managedcluster_client = None + self._traffic_manager_management_client = None + self._monitor_autoscale_settings_client = None + self._monitor_log_profiles_client = None + self._monitor_diagnostic_settings_client = None + self._resource = None + self._log_analytics_client = None + self._servicebus_client = None + self._automation_client = None + self._IoThub_client = None + self._lock_client = None + self._recovery_services_backup_client = None + self._search_client = None + self._datalake_store_client = None + self._datafactory_client = None + self._notification_hub_client = None + self._event_hub_client = None + + self.check_mode = self.module.check_mode + self.api_profile = self.module.params.get('api_profile') + self.facts_module = facts_module + # self.debug = self.module.params.get('debug') + + # delegate auth to AzureRMAuth class (shared with all plugin types) + self.azure_auth = AzureRMAuth(fail_impl=self.fail, is_ad_resource=is_ad_resource, **self.module.params) + + # common parameter validation + if self.module.params.get('tags'): + self.validate_tags(self.module.params['tags']) + + if not skip_exec: + res = self.exec_module(**self.module.params) + self.module.exit_json(**res) + + def check_client_version(self, client_type): + # Ensure Azure modules are at least 2.0.0rc5. + package_version = AZURE_PKG_VERSIONS.get(client_type.__name__, None) + if package_version is not None: + client_name = package_version.get('package_name') + try: + client_module = importlib.import_module(client_type.__module__) + client_version = client_module.VERSION + except (RuntimeError, AttributeError): + # can't get at the module version for some reason, just fail silently... + return + expected_version = package_version.get('expected_version') + if Version(client_version) < Version(expected_version): + self.fail("Installed azure-mgmt-{0} client version is {1}. The minimum supported version is {2}. Try " + "`pip install ansible[azure]`".format(client_name, client_version, expected_version)) + if Version(client_version) != Version(expected_version): + self.module.warn("Installed azure-mgmt-{0} client version is {1}. The expected version is {2}. Try " + "`pip install ansible[azure]`".format(client_name, client_version, expected_version)) + + def exec_module(self, **kwargs): + self.fail("Error: {0} failed to implement exec_module method.".format(self.__class__.__name__)) + + def fail(self, msg, **kwargs): + ''' + Shortcut for calling module.fail() + + :param msg: Error message text. + :param kwargs: Any key=value pairs + :return: None + ''' + self.module.fail_json(msg=msg, **kwargs) + + def deprecate(self, msg, version=None): + self.module.deprecate(msg, version) + + def log(self, msg, pretty_print=False): + if pretty_print: + self.module.debug(json.dumps(msg, indent=4, sort_keys=True)) + else: + self.module.debug(msg) + + def validate_tags(self, tags): + ''' + Check if tags dictionary contains string:string pairs. + + :param tags: dictionary of string:string pairs + :return: None + ''' + if not self.facts_module: + if not isinstance(tags, dict): + self.fail("Tags must be a dictionary of string:string values.") + for key, value in tags.items(): + if not isinstance(value, str): + self.fail("Tags values must be strings. Found {0}:{1}".format(str(key), str(value))) + + def update_tags(self, tags): + ''' + Call from the module to update metadata tags. Returns tuple + with bool indicating if there was a change and dict of new + tags to assign to the object. + + :param tags: metadata tags from the object + :return: bool, dict + ''' + tags = tags or dict() + new_tags = copy.copy(tags) if isinstance(tags, dict) else dict() + param_tags = self.module.params.get('tags') if isinstance(self.module.params.get('tags'), dict) else dict() + append_tags = self.module.params.get('append_tags') if self.module.params.get('append_tags') is not None else True + changed = False + # check add or update + for key, value in param_tags.items(): + if not new_tags.get(key) or new_tags[key] != value: + changed = True + new_tags[key] = value + # check remove + if not append_tags: + for key, value in tags.items(): + if not param_tags.get(key): + new_tags.pop(key) + changed = True + return changed, new_tags + + def has_tags(self, obj_tags, tag_list): + ''' + Used in fact modules to compare object tags to list of parameter tags. Return true if list of parameter tags + exists in object tags. + + :param obj_tags: dictionary of tags from an Azure object. + :param tag_list: list of tag keys or tag key:value pairs + :return: bool + ''' + + if not obj_tags and tag_list: + return False + + if not tag_list: + return True + + matches = 0 + result = False + for tag in tag_list: + tag_key = tag + tag_value = None + if ':' in tag: + tag_key, tag_value = tag.split(':') + if tag_value and obj_tags.get(tag_key) == tag_value: + matches += 1 + elif not tag_value and obj_tags.get(tag_key): + matches += 1 + if matches == len(tag_list): + result = True + return result + + def get_resource_group(self, resource_group): + ''' + Fetch a resource group. + + :param resource_group: name of a resource group + :return: resource group object + ''' + try: + return self.rm_client.resource_groups.get(resource_group) + except CloudError as cloud_error: + self.fail("Error retrieving resource group {0} - {1}".format(resource_group, cloud_error.message)) + except Exception as exc: + self.fail("Error retrieving resource group {0} - {1}".format(resource_group, str(exc))) + + def parse_resource_to_dict(self, resource): + ''' + Return a dict of the give resource, which contains name and resource group. + + :param resource: It can be a resource name, id or a dict contains name and resource group. + ''' + resource_dict = parse_resource_id(resource) if not isinstance(resource, dict) else resource + resource_dict['resource_group'] = resource_dict.get('resource_group', self.resource_group) + resource_dict['subscription_id'] = resource_dict.get('subscription_id', self.subscription_id) + return resource_dict + + def serialize_obj(self, obj, class_name, enum_modules=None): + ''' + Return a JSON representation of an Azure object. + + :param obj: Azure object + :param class_name: Name of the object's class + :param enum_modules: List of module names to build enum dependencies from. + :return: serialized result + ''' + enum_modules = [] if enum_modules is None else enum_modules + + dependencies = dict() + if enum_modules: + for module_name in enum_modules: + mod = importlib.import_module(module_name) + for mod_class_name, mod_class_obj in inspect.getmembers(mod, predicate=inspect.isclass): + dependencies[mod_class_name] = mod_class_obj + self.log("dependencies: ") + self.log(str(dependencies)) + serializer = Serializer(classes=dependencies) + return serializer.body(obj, class_name, keep_readonly=True) + + def get_poller_result(self, poller, wait=5): + ''' + Consistent method of waiting on and retrieving results from Azure's long poller + + :param poller Azure poller object + :return object resulting from the original request + ''' + try: + delay = wait + while not poller.done(): + self.log("Waiting for {0} sec".format(delay)) + poller.wait(timeout=delay) + return poller.result() + except Exception as exc: + self.log(str(exc)) + raise + + def get_multiple_pollers_results(self, pollers, wait=0.05): + ''' + Consistent method of waiting on and retrieving results from multiple Azure's long poller + + :param pollers list of Azure poller object + :param wait Period of time to wait for the long running operation to complete. + :return list of object resulting from the original request + ''' + + def _continue_polling(): + return not all(poller.done() for poller in pollers) + + try: + while _continue_polling(): + for poller in pollers: + if poller.done(): + continue + self.log("Waiting for {0} sec".format(wait)) + poller.wait(timeout=wait) + return [poller.result() for poller in pollers] + except Exception as exc: + self.log(str(exc)) + raise + + def check_provisioning_state(self, azure_object, requested_state='present'): + ''' + Check an Azure object's provisioning state. If something did not complete the provisioning + process, then we cannot operate on it. + + :param azure_object An object such as a subnet, storageaccount, etc. Must have provisioning_state + and name attributes. + :return None + ''' + + if hasattr(azure_object, 'properties') and hasattr(azure_object.properties, 'provisioning_state') and \ + hasattr(azure_object, 'name'): + # resource group object fits this model + if isinstance(azure_object.properties.provisioning_state, Enum): + if azure_object.properties.provisioning_state.value != AZURE_SUCCESS_STATE and \ + requested_state != 'absent': + self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format( + azure_object.name, azure_object.properties.provisioning_state, AZURE_SUCCESS_STATE)) + return + if azure_object.properties.provisioning_state != AZURE_SUCCESS_STATE and \ + requested_state != 'absent': + self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format( + azure_object.name, azure_object.properties.provisioning_state, AZURE_SUCCESS_STATE)) + return + + if hasattr(azure_object, 'provisioning_state') or not hasattr(azure_object, 'name'): + if isinstance(azure_object.provisioning_state, Enum): + if azure_object.provisioning_state.value != AZURE_SUCCESS_STATE and requested_state != 'absent': + self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format( + azure_object.name, azure_object.provisioning_state, AZURE_SUCCESS_STATE)) + return + if azure_object.provisioning_state != AZURE_SUCCESS_STATE and requested_state != 'absent': + self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format( + azure_object.name, azure_object.provisioning_state, AZURE_SUCCESS_STATE)) + + def get_blob_service_client(self, resource_group_name, storage_account_name): + try: + self.log("Getting storage account detail") + account = self.storage_client.storage_accounts.get_properties(resource_group_name=resource_group_name, account_name=storage_account_name) + account_keys = self.storage_client.storage_accounts.list_keys(resource_group_name=resource_group_name, account_name=storage_account_name) + except Exception as exc: + self.fail("Error getting storage account detail for {0}: {1}".format(storage_account_name, str(exc))) + + try: + self.log("Create blob service client") + return BlobServiceClient( + account_url=account.primary_endpoints.blob, + credential=account_keys.keys[0].value, + ) + except Exception as exc: + self.fail("Error creating blob service client for storage account {0} - {1}".format(storage_account_name, str(exc))) + + def create_default_pip(self, resource_group, location, public_ip_name, allocation_method='Dynamic', sku=None): + ''' + Create a default public IP address to associate with a network interface. + If a PIP address matching exists, return it. Otherwise, create one. + + :param resource_group: name of an existing resource group + :param location: a valid azure location + :param public_ip_name: base name to assign the public IP address + :param allocation_method: one of 'Static' or 'Dynamic' + :param sku: sku + :return: PIP object + ''' + pip = None + + self.log("Starting create_default_pip {0}".format(public_ip_name)) + self.log("Check to see if public IP {0} exists".format(public_ip_name)) + try: + pip = self.network_client.public_ip_addresses.get(resource_group, public_ip_name) + except Exception: + pass + + if pip: + self.log("Public ip {0} found.".format(public_ip_name)) + self.check_provisioning_state(pip) + return pip + + params = self.network_models.PublicIPAddress( + location=location, + public_ip_allocation_method=allocation_method, + sku=sku + ) + self.log('Creating default public IP {0}'.format(public_ip_name)) + try: + poller = self.network_client.public_ip_addresses.begin_create_or_update(resource_group, public_ip_name, params) + except Exception as exc: + self.fail("Error creating {0} - {1}".format(public_ip_name, str(exc))) + + return self.get_poller_result(poller) + + def create_default_securitygroup(self, resource_group, location, security_group_name, os_type, open_ports): + ''' + Create a default security group to associate with a network interface. If a security group matching + exists, return it. Otherwise, create one. + + :param resource_group: Resource group name + :param location: azure location name + :param security_group_name: base name to use for the security group + :param os_type: one of 'Windows' or 'Linux'. Determins any default rules added to the security group. + :param ssh_port: for os_type 'Linux' port used in rule allowing SSH access. + :param rdp_port: for os_type 'Windows' port used in rule allowing RDP access. + :return: security_group object + ''' + group = None + + self.log("Create security group {0}".format(security_group_name)) + self.log("Check to see if security group {0} exists".format(security_group_name)) + try: + group = self.network_client.network_security_groups.get(resource_group, security_group_name) + except Exception: + pass + + if group: + self.log("Security group {0} found.".format(security_group_name)) + self.check_provisioning_state(group) + return group + + parameters = self.network_models.NetworkSecurityGroup() + parameters.location = location + + if not open_ports: + # Open default ports based on OS type + if os_type == 'Linux': + # add an inbound SSH rule + parameters.security_rules = [ + self.network_models.SecurityRule(protocol='Tcp', + source_address_prefix='*', + destination_address_prefix='*', + access='Allow', + direction='Inbound', + description='Allow SSH Access', + source_port_range='*', + destination_port_range='22', + priority=100, + name='SSH') + ] + parameters.location = location + else: + # for windows add inbound RDP and WinRM rules + parameters.security_rules = [ + self.network_models.SecurityRule(protocol='Tcp', + source_address_prefix='*', + destination_address_prefix='*', + access='Allow', + direction='Inbound', + description='Allow RDP port 3389', + source_port_range='*', + destination_port_range='3389', + priority=100, + name='RDP01'), + self.network_models.SecurityRule(protocol='Tcp', + source_address_prefix='*', + destination_address_prefix='*', + access='Allow', + direction='Inbound', + description='Allow WinRM HTTPS port 5986', + source_port_range='*', + destination_port_range='5986', + priority=101, + name='WinRM01'), + ] + else: + # Open custom ports + parameters.security_rules = [] + priority = 100 + for port in open_ports: + priority += 1 + rule_name = "Rule_{0}".format(priority) + parameters.security_rules.append( + self.network_models.SecurityRule(protocol='Tcp', + source_address_prefix='*', + destination_address_prefix='*', + access='Allow', + direction='Inbound', + source_port_range='*', + destination_port_range=str(port), + priority=priority, + name=rule_name) + ) + + self.log('Creating default security group {0}'.format(security_group_name)) + try: + poller = self.network_client.network_security_groups.begin_create_or_update(resource_group, + security_group_name, + parameters) + except Exception as exc: + self.fail("Error creating default security rule {0} - {1}".format(security_group_name, str(exc))) + + return self.get_poller_result(poller) + + @staticmethod + def _validation_ignore_callback(session, global_config, local_config, **kwargs): + session.verify = False + + def get_api_profile(self, client_type_name, api_profile_name): + profile_all_clients = AZURE_API_PROFILES.get(api_profile_name) + + if not profile_all_clients: + raise KeyError("unknown Azure API profile: {0}".format(api_profile_name)) + + profile_raw = profile_all_clients.get(client_type_name, None) + + if not profile_raw: + self.module.warn("Azure API profile {0} does not define an entry for {1}".format(api_profile_name, client_type_name)) + + if isinstance(profile_raw, dict): + if not profile_raw.get('default_api_version'): + raise KeyError("Azure API profile {0} does not define 'default_api_version'".format(api_profile_name)) + return profile_raw + + # wrap basic strings in a dict that just defines the default + return dict(default_api_version=profile_raw) + + def get_graphrbac_client(self, tenant_id): + cred = self.azure_auth.azure_credentials + base_url = self.azure_auth._cloud_environment.endpoints.active_directory_graph_resource_id + client = GraphRbacManagementClient(cred, tenant_id, base_url) + + return client + + def get_mgmt_svc_client(self, client_type, base_url=None, api_version=None, suppress_subscription_id=False, is_track2=False): + self.log('Getting management service client {0}'.format(client_type.__name__)) + self.check_client_version(client_type) + + client_argspec = inspect.signature(client_type.__init__) + + if not base_url: + # most things are resource_manager, don't make everyone specify + base_url = self.azure_auth._cloud_environment.endpoints.resource_manager + + # https://github.com/Azure/msrestazure-for-python/pull/169 + # China's base_url doesn't end in a trailing slash, though others do, + # and we need a trailing slash when generating credential_scopes below. + if not base_url.endswith("/"): + base_url += "/" + + mgmt_subscription_id = self.azure_auth.subscription_id + if self.module.params.get('subscription_id'): + mgmt_subscription_id = self.module.params.get('subscription_id') + + # Some management clients do not take a subscription ID as parameters. + if suppress_subscription_id: + if is_track2: + client_kwargs = dict(credential=self.azure_auth.azure_credential_track2, base_url=base_url, credential_scopes=[base_url + ".default"]) + else: + client_kwargs = dict(credentials=self.azure_auth.azure_credentials, base_url=base_url) + else: + if is_track2: + client_kwargs = dict(credential=self.azure_auth.azure_credential_track2, + subscription_id=mgmt_subscription_id, base_url=base_url, credential_scopes=[base_url + ".default"]) + else: + client_kwargs = dict(credentials=self.azure_auth.azure_credentials, subscription_id=mgmt_subscription_id, base_url=base_url) + + api_profile_dict = {} + + if self.api_profile: + api_profile_dict = self.get_api_profile(client_type.__name__, self.api_profile) + + # unversioned clients won't accept profile; only send it if necessary + # clients without a version specified in the profile will use the default + if api_profile_dict and 'profile' in client_argspec.parameters: + client_kwargs['profile'] = api_profile_dict + + # If the client doesn't accept api_version, it's unversioned. + # If it does, favor explicitly-specified api_version, fall back to api_profile + if 'api_version' in client_argspec.parameters: + profile_default_version = api_profile_dict.get('default_api_version', None) + if api_version or profile_default_version: + client_kwargs['api_version'] = api_version or profile_default_version + if 'profile' in client_kwargs: + # remove profile; only pass API version if specified + client_kwargs.pop('profile') + + client = client_type(**client_kwargs) + + # FUTURE: remove this once everything exposes models directly (eg, containerinstance) + try: + getattr(client, "models") + except AttributeError: + def _ansible_get_models(self, *arg, **kwarg): + return self._ansible_models + + setattr(client, '_ansible_models', importlib.import_module(client_type.__module__).models) + client.models = types.MethodType(_ansible_get_models, client) + + if not is_track2: + client.config = self.add_user_agent(client.config) + if self.azure_auth._cert_validation_mode == 'ignore': + client.config.session_configuration_callback = self._validation_ignore_callback + else: + if self.azure_auth._cert_validation_mode == 'ignore': + client._config.session_configuration_callback = self._validation_ignore_callback + + return client + + def add_user_agent(self, config): + # Add user agent for Ansible + config.add_user_agent(ANSIBLE_USER_AGENT) + # Add user agent when running from Cloud Shell + if CLOUDSHELL_USER_AGENT_KEY in os.environ: + config.add_user_agent(os.environ[CLOUDSHELL_USER_AGENT_KEY]) + # Add user agent when running from VSCode extension + if VSCODEEXT_USER_AGENT_KEY in os.environ: + config.add_user_agent(os.environ[VSCODEEXT_USER_AGENT_KEY]) + return config + + def generate_sas_token(self, **kwags): + base_url = kwags.get('base_url', None) + expiry = kwags.get('expiry', time() + 3600) + key = kwags.get('key', None) + policy = kwags.get('policy', None) + url = quote_plus(base_url) + ttl = int(expiry) + sign_key = '{0}\n{1}'.format(url, ttl) + signature = b64encode(HMAC(b64decode(key), sign_key.encode('utf-8'), sha256).digest()) + result = { + 'sr': url, + 'sig': signature, + 'se': str(ttl), + } + if policy: + result['skn'] = policy + return 'SharedAccessSignature ' + urlencode(result) + + def get_data_svc_client(self, **kwags): + url = kwags.get('base_url', None) + config = AzureConfiguration(base_url='https://{0}'.format(url)) + config.credentials = AzureSASAuthentication(token=self.generate_sas_token(**kwags)) + config = self.add_user_agent(config) + return ServiceClient(creds=config.credentials, config=config) + + def get_subnet_detail(self, subnet_id): + vnet_detail = subnet_id.split('/Microsoft.Network/virtualNetworks/')[1].split('/subnets/') + return dict( + resource_group=subnet_id.split('resourceGroups/')[1].split('/')[0], + vnet_name=vnet_detail[0], + subnet_name=vnet_detail[1], + ) + + # passthru methods to AzureAuth instance for backcompat + @property + def credentials(self): + return self.azure_auth.credentials + + @property + def _cloud_environment(self): + return self.azure_auth._cloud_environment + + @property + def subscription_id(self): + return self.azure_auth.subscription_id + + @property + def storage_client(self): + self.log('Getting storage client...') + if not self._storage_client: + self._storage_client = self.get_mgmt_svc_client(StorageManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager, + is_track2=True, + api_version='2021-06-01') + return self._storage_client + + @property + def storage_models(self): + return StorageManagementClient.models("2021-06-01") + + @property + def authorization_client(self): + self.log('Getting authorization client...') + if not self._authorization_client: + self._authorization_client = self.get_mgmt_svc_client(AuthorizationManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager, + is_track2=True, + api_version='2020-04-01-preview') + return self._authorization_client + + @property + def authorization_models(self): + return AuthorizationManagementClient.models('2020-04-01-preview') + + @property + def subscription_client(self): + self.log('Getting subscription client...') + if not self._subscription_client: + self._subscription_client = self.get_mgmt_svc_client(SubscriptionClient, + base_url=self._cloud_environment.endpoints.resource_manager, + suppress_subscription_id=True, + is_track2=True, + api_version='2019-11-01') + return self._subscription_client + + @property + def subscription_models(self): + return SubscriptionClient.models("2019-11-01") + + @property + def management_groups_client(self): + self.log('Getting Management Groups client...') + if not self._management_group_client: + self._management_group_client = self.get_mgmt_svc_client(ManagementGroupsClient, + base_url=self._cloud_environment.endpoints.resource_manager, + suppress_subscription_id=True, + is_track2=True, + api_version='2020-05-01') + return self._management_group_client + + @property + def network_client(self): + self.log('Getting network client') + if not self._network_client: + self._network_client = self.get_mgmt_svc_client(NetworkManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager, + is_track2=True, + api_version='2021-03-01') + return self._network_client + + @property + def network_models(self): + self.log("Getting network models...") + return NetworkManagementClient.models("2021-03-01") + + @property + def rm_client(self): + self.log('Getting resource manager client') + if not self._resource_client: + self._resource_client = self.get_mgmt_svc_client(ResourceManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager, + is_track2=True, + api_version='2019-10-01') + return self._resource_client + + @property + def rm_models(self): + self.log("Getting resource manager models") + return ResourceManagementClient.models("2019-10-01") + + @property + def image_client(self): + self.log('Getting compute image client') + if not self._image_client: + self._image_client = self.get_mgmt_svc_client(ComputeManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager, + is_track2=True, + api_version='2021-04-01') + return self._image_client + + @property + def image_models(self): + self.log("Getting compute image models") + return ComputeManagementClient.models("2021-04-01") + + @property + def compute_client(self): + self.log('Getting compute client') + if not self._compute_client: + self._compute_client = self.get_mgmt_svc_client(ComputeManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager, + is_track2=True, + api_version='2021-04-01') + return self._compute_client + + @property + def compute_models(self): + self.log("Getting compute models") + return ComputeManagementClient.models("2021-04-01") + + @property + def dns_client(self): + self.log('Getting dns client') + if not self._dns_client: + self._dns_client = self.get_mgmt_svc_client(DnsManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager, + is_track2=True, + api_version='2018-05-01') + return self._dns_client + + @property + def dns_models(self): + self.log("Getting dns models...") + return DnsManagementClient.models('2018-05-01') + + @property + def private_dns_client(self): + self.log('Getting private dns client') + if not self._private_dns_client: + self._private_dns_client = self.get_mgmt_svc_client( + PrivateDnsManagementClient, + is_track2=True, + base_url=self._cloud_environment.endpoints.resource_manager) + return self._private_dns_client + + @property + def private_dns_models(self): + self.log('Getting private dns models') + return PrivateDnsModels + + @property + def web_client(self): + self.log('Getting web client') + if not self._web_client: + self._web_client = self.get_mgmt_svc_client(WebSiteManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager, + is_track2=True, + api_version='2021-03-01') + return self._web_client + + @property + def containerservice_client(self): + self.log('Getting container service client') + if not self._containerservice_client: + self._containerservice_client = self.get_mgmt_svc_client(ContainerServiceClient, + base_url=self._cloud_environment.endpoints.resource_manager, + is_track2=True, + api_version='2017-07-01') + return self._containerservice_client + + @property + def managedcluster_models(self): + self.log("Getting container service models") + return ContainerServiceClient.models('2022-02-01') + + @property + def managedcluster_client(self): + self.log('Getting container service client') + if not self._managedcluster_client: + self._managedcluster_client = self.get_mgmt_svc_client(ContainerServiceClient, + base_url=self._cloud_environment.endpoints.resource_manager, + is_track2=True, + api_version='2022-02-01') + return self._managedcluster_client + + @property + def sql_client(self): + self.log('Getting SQL client') + if not self._sql_client: + self._sql_client = self.get_mgmt_svc_client(SqlManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager, + is_track2=True) + return self._sql_client + + @property + def postgresql_client(self): + self.log('Getting PostgreSQL client') + if not self._postgresql_client: + self._postgresql_client = self.get_mgmt_svc_client(PostgreSQLManagementClient, + is_track2=True, + base_url=self._cloud_environment.endpoints.resource_manager) + return self._postgresql_client + + @property + def mysql_client(self): + self.log('Getting MySQL client') + if not self._mysql_client: + self._mysql_client = self.get_mgmt_svc_client(MySQLManagementClient, + is_track2=True, + base_url=self._cloud_environment.endpoints.resource_manager) + return self._mysql_client + + @property + def mariadb_client(self): + self.log('Getting MariaDB client') + if not self._mariadb_client: + self._mariadb_client = self.get_mgmt_svc_client(MariaDBManagementClient, + is_track2=True, + base_url=self._cloud_environment.endpoints.resource_manager) + return self._mariadb_client + + @property + def containerregistry_client(self): + self.log('Getting container registry mgmt client') + if not self._containerregistry_client: + self._containerregistry_client = self.get_mgmt_svc_client(ContainerRegistryManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager, + is_track2=True, + api_version='2021-09-01') + + return self._containerregistry_client + + @property + def containerinstance_client(self): + self.log('Getting container instance mgmt client') + if not self._containerinstance_client: + self._containerinstance_client = self.get_mgmt_svc_client(ContainerInstanceManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager, + is_track2=True, + api_version='2018-06-01') + + return self._containerinstance_client + + @property + def marketplace_client(self): + self.log('Getting marketplace agreement client') + if not self._marketplace_client: + self._marketplace_client = self.get_mgmt_svc_client(MarketplaceOrderingAgreements, + is_track2=True, + base_url=self._cloud_environment.endpoints.resource_manager) + return self._marketplace_client + + @property + def traffic_manager_management_client(self): + self.log('Getting traffic manager client') + if not self._traffic_manager_management_client: + self._traffic_manager_management_client = self.get_mgmt_svc_client(TrafficManagerManagementClient, + is_track2=True, + base_url=self._cloud_environment.endpoints.resource_manager) + return self._traffic_manager_management_client + + @property + def monitor_autoscale_settings_client(self): + self.log('Getting monitor client for autoscale_settings') + if not self._monitor_autoscale_settings_client: + self._monitor_autoscale_settings_client = self.get_mgmt_svc_client(MonitorManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager, + api_version="2015-04-01", + is_track2=True) + return self._monitor_autoscale_settings_client + + @property + def monitor_log_profiles_client(self): + self.log('Getting monitor client for log_profiles') + if not self._monitor_log_profiles_client: + self._monitor_log_profiles_client = self.get_mgmt_svc_client(MonitorManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager, + api_version="2016-03-01", + is_track2=True) + return self._monitor_log_profiles_client + + @property + def monitor_diagnostic_settings_client(self): + self.log('Getting monitor client for diagnostic_settings') + if not self._monitor_diagnostic_settings_client: + self._monitor_diagnostic_settings_client = self.get_mgmt_svc_client(MonitorManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager, + api_version="2021-05-01-preview", + is_track2=True) + return self._monitor_diagnostic_settings_client + + @property + def log_analytics_client(self): + self.log('Getting log analytics client') + if not self._log_analytics_client: + self._log_analytics_client = self.get_mgmt_svc_client(LogAnalyticsManagementClient, + is_track2=True, + base_url=self._cloud_environment.endpoints.resource_manager) + return self._log_analytics_client + + @property + def log_analytics_models(self): + self.log('Getting log analytics models') + return LogAnalyticsModels + + @property + def servicebus_client(self): + self.log('Getting servicebus client') + if not self._servicebus_client: + self._servicebus_client = self.get_mgmt_svc_client(ServiceBusManagementClient, + is_track2=True, + api_version="2021-06-01-preview", + base_url=self._cloud_environment.endpoints.resource_manager) + return self._servicebus_client + + @property + def servicebus_models(self): + return ServiceBusManagementClient.models("2021-06-01-preview") + + @property + def automation_client(self): + self.log('Getting automation client') + if not self._automation_client: + self._automation_client = self.get_mgmt_svc_client(AutomationClient, + base_url=self._cloud_environment.endpoints.resource_manager, + is_track2=True) + return self._automation_client + + @property + def automation_models(self): + return AutomationModel + + @property + def IoThub_client(self): + self.log('Getting iothub client') + if not self._IoThub_client: + self._IoThub_client = self.get_mgmt_svc_client(IotHubClient, + is_track2=True, + api_version='2018-04-01', + base_url=self._cloud_environment.endpoints.resource_manager) + return self._IoThub_client + + @property + def IoThub_models(self): + return IoTHubModels + + @property + def lock_client(self): + self.log('Getting lock client') + if not self._lock_client: + self._lock_client = self.get_mgmt_svc_client(ManagementLockClient, + base_url=self._cloud_environment.endpoints.resource_manager, + is_track2=True, + api_version='2016-09-01') + return self._lock_client + + @property + def lock_models(self): + self.log("Getting lock models") + return ManagementLockClient.models('2016-09-01') + + @property + def recovery_services_backup_client(self): + self.log('Getting recovery services backup client') + if not self._recovery_services_backup_client: + self._recovery_services_backup_client = self.get_mgmt_svc_client(RecoveryServicesBackupClient, + is_track2=True, + base_url=self._cloud_environment.endpoints.resource_manager) + return self._recovery_services_backup_client + + @property + def recovery_services_backup_models(self): + return RecoveryServicesBackupModels + + @property + def search_client(self): + self.log('Getting search client...') + if not self._search_client: + self._search_client = self.get_mgmt_svc_client(SearchManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager, + is_track2=True, + api_version='2020-08-01') + return self._search_client + + @property + def datalake_store_client(self): + self.log('Getting datalake store client...') + if not self._datalake_store_client: + self._datalake_store_client = self.get_mgmt_svc_client(DataLakeStoreAccountManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager, + is_track2=True, + api_version='2016-11-01') + return self._datalake_store_client + + @property + def datalake_store_models(self): + return DataLakeStoreAccountModel + + @property + def notification_hub_client(self): + self.log('Getting notification hub client') + if not self._notification_hub_client: + self._notification_hub_client = self.get_mgmt_svc_client( + NotificationHubsManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager, + is_track2=True, + api_version='2016-03-01') + return self._notification_hub_client + + @property + def event_hub_client(self): + self.log('Getting event hub client') + if not self._event_hub_client: + self._event_hub_client = self.get_mgmt_svc_client( + EventHubManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager, + is_track2=True, + api_version='2021-11-01') + return self._event_hub_client + + @property + def datafactory_client(self): + self.log('Getting datafactory client...') + if not self._datafactory_client: + self._datafactory_client = self.get_mgmt_svc_client(DataFactoryManagementClient, + is_track2=True, + base_url=self._cloud_environment.endpoints.resource_manager) + return self._datafactory_client + + @property + def datafactory_model(self): + return DataFactoryModel + + +class AzureSASAuthentication(Authentication): + """Simple SAS Authentication. + An implementation of Authentication in + https://github.com/Azure/msrest-for-python/blob/0732bc90bdb290e5f58c675ffdd7dbfa9acefc93/msrest/authentication.py + + :param str token: SAS token + """ + def __init__(self, token): + self.token = token + + def signed_session(self): + session = super(AzureSASAuthentication, self).signed_session() + session.headers['Authorization'] = self.token + return session + + +class AzureRMAuthException(Exception): + pass + + +class AzureRMAuth(object): + _cloud_environment = None + _adfs_authority_url = None + + def __init__(self, auth_source=None, profile=None, subscription_id=None, client_id=None, secret=None, + tenant=None, ad_user=None, password=None, cloud_environment='AzureCloud', cert_validation_mode='validate', + api_profile='latest', adfs_authority_url=None, fail_impl=None, is_ad_resource=False, + x509_certificate_path=None, thumbprint=None, **kwargs): + + if fail_impl: + self._fail_impl = fail_impl + else: + self._fail_impl = self._default_fail_impl + self.is_ad_resource = is_ad_resource + + # authenticate + self.credentials = self._get_credentials( + auth_source=auth_source, + profile=profile, + subscription_id=subscription_id, + client_id=client_id, + secret=secret, + tenant=tenant, + ad_user=ad_user, + password=password, + cloud_environment=cloud_environment, + cert_validation_mode=cert_validation_mode, + api_profile=api_profile, + adfs_authority_url=adfs_authority_url, + x509_certificate_path=x509_certificate_path, + thumbprint=thumbprint) + + if not self.credentials: + if HAS_AZURE_CLI_CORE: + self.fail("Failed to get credentials. Either pass as parameters, set environment variables, " + "define a profile in ~/.azure/credentials, or log in with Azure CLI (`az login`).") + else: + self.fail("Failed to get credentials. Either pass as parameters, set environment variables, " + "define a profile in ~/.azure/credentials, or install Azure CLI and log in (`az login`).") + + # cert validation mode precedence: module-arg, credential profile, env, "validate" + self._cert_validation_mode = cert_validation_mode or \ + self.credentials.get('cert_validation_mode') or \ + self._get_env('cert_validation_mode') or \ + 'validate' + + if self._cert_validation_mode not in ['validate', 'ignore']: + self.fail('invalid cert_validation_mode: {0}'.format(self._cert_validation_mode)) + + # if cloud_environment specified, look up/build Cloud object + raw_cloud_env = self.credentials.get('cloud_environment') + if self.credentials.get('credentials') is not None and raw_cloud_env is not None: + self._cloud_environment = raw_cloud_env + elif not raw_cloud_env: + self._cloud_environment = azure_cloud.AZURE_PUBLIC_CLOUD # SDK default + else: + # try to look up "well-known" values via the name attribute on azure_cloud members + all_clouds = [x[1] for x in inspect.getmembers(azure_cloud) if isinstance(x[1], azure_cloud.Cloud)] + matched_clouds = [x for x in all_clouds if x.name == raw_cloud_env] + if len(matched_clouds) == 1: + self._cloud_environment = matched_clouds[0] + elif len(matched_clouds) > 1: + self.fail("Azure SDK failure: more than one cloud matched for cloud_environment name '{0}'".format(raw_cloud_env)) + else: + if not urlparse.urlparse(raw_cloud_env).scheme: + self.fail("cloud_environment must be an endpoint discovery URL or one of {0}".format([x.name for x in all_clouds])) + try: + self._cloud_environment = azure_cloud.get_cloud_from_metadata_endpoint(raw_cloud_env) + except Exception as e: + self.fail("cloud_environment {0} could not be resolved: {1}".format(raw_cloud_env, e.message), exception=traceback.format_exc()) + + if self.credentials.get('subscription_id', None) is None and self.credentials.get('credentials') is None: + self.fail("Credentials did not include a subscription_id value.") + self.log("setting subscription_id") + self.subscription_id = self.credentials['subscription_id'] + + # get authentication authority + # for adfs, user could pass in authority or not. + # for others, use default authority from cloud environment + if self.credentials.get('adfs_authority_url') is None: + self._adfs_authority_url = self._cloud_environment.endpoints.active_directory + else: + self._adfs_authority_url = self.credentials.get('adfs_authority_url') + + if self.credentials.get('auth_source') == 'msi': + # MSI Credentials + self.azure_credentials = self.credentials['credentials'] + self.azure_credential_track2 = self.credentials['credential'] + elif self.credentials.get('credentials') is not None: + # AzureCLI credentials + self.azure_credentials = self.credentials['credentials'] + self.azure_credential_track2 = self.credentials['credentials'] + elif self.credentials.get('client_id') is not None and \ + self.credentials.get('secret') is not None and \ + self.credentials.get('tenant') is not None: + + graph_resource = self._cloud_environment.endpoints.active_directory_graph_resource_id + rm_resource = self._cloud_environment.endpoints.resource_manager + self.azure_credentials = ServicePrincipalCredentials(client_id=self.credentials['client_id'], + secret=self.credentials['secret'], + tenant=self.credentials['tenant'], + cloud_environment=self._cloud_environment, + resource=graph_resource if self.is_ad_resource else rm_resource, + verify=self._cert_validation_mode == 'validate') + self.azure_credential_track2 = client_secret.ClientSecretCredential(client_id=self.credentials['client_id'], + client_secret=self.credentials['secret'], + tenant_id=self.credentials['tenant']) + + elif self.credentials.get('client_id') is not None and \ + self.credentials.get('tenant') is not None and \ + self.credentials.get('thumbprint') is not None and \ + self.credentials.get('x509_certificate_path') is not None: + + self.azure_credentials = self.acquire_token_with_client_certificate( + self._adfs_authority_url, + self._cloud_environment.endpoints.active_directory_resource_id, + self.credentials['x509_certificate_path'], + self.credentials['thumbprint'], + self.credentials['client_id'], + self.credentials['tenant']) + + self.azure_credential_track2 = certificate.CertificateCredential(tenant_id=self.credentials['tenant'], + client_id=self.credentials['client_id'], + certificate_path=self.credentials['x509_certificate_path']) + + elif self.credentials.get('ad_user') is not None and self.credentials.get('password') is not None: + tenant = self.credentials.get('tenant') + if not tenant: + tenant = 'common' # SDK default + + self.azure_credentials = UserPassCredentials(self.credentials['ad_user'], + self.credentials['password'], + tenant=tenant, + cloud_environment=self._cloud_environment, + verify=self._cert_validation_mode == 'validate') + + client_id = self.credentials.get('client_id', '04b07795-8ddb-461a-bbee-02f9e1bf7b46') + + self.azure_credential_track2 = user_password.UsernamePasswordCredential(username=self.credentials['ad_user'], + password=self.credentials['password'], + tenant_id=self.credentials.get('tenant', 'organizations'), + client_id=client_id) + + elif self.credentials.get('ad_user') is not None and \ + self.credentials.get('password') is not None and \ + self.credentials.get('client_id') is not None and \ + self.credentials.get('tenant') is not None: + + self.azure_credentials = self.acquire_token_with_username_password( + self._adfs_authority_url, + self._cloud_environment.endpoints.active_directory_resource_id, + self.credentials['ad_user'], + self.credentials['password'], + self.credentials['client_id'], + self.credentials['tenant']) + + else: + self.fail("Failed to authenticate with provided credentials. Some attributes were missing. " + "Credentials must include client_id, secret and tenant or ad_user and password, or " + "ad_user, password, client_id, tenant and adfs_authority_url(optional) for ADFS authentication, or " + "be logged in using AzureCLI.") + + def fail(self, msg, exception=None, **kwargs): + self._fail_impl(msg) + + def _default_fail_impl(self, msg, exception=None, **kwargs): + raise AzureRMAuthException(msg) + + def _get_env(self, module_key, default=None): + "Read envvar matching module parameter" + return os.environ.get(AZURE_CREDENTIAL_ENV_MAPPING[module_key], default) + + def _get_profile(self, profile="default"): + path = expanduser("~/.azure/credentials") + try: + config = configparser.ConfigParser() + config.read(path) + except Exception as exc: + self.fail("Failed to access {0}. Check that the file exists and you have read " + "access. {1}".format(path, str(exc))) + credentials = dict() + for key in AZURE_CREDENTIAL_ENV_MAPPING: + try: + credentials[key] = config.get(profile, key, raw=True) + except Exception: + pass + + if credentials.get('subscription_id'): + return credentials + + return None + + def _get_msi_credentials(self, subscription_id=None, client_id=None, _cloud_environment=None, **kwargs): + # Get object `cloud_environment` from string `_cloud_environment` + cloud_environment = None + if not _cloud_environment: + cloud_environment = azure_cloud.AZURE_PUBLIC_CLOUD + else: + # try to look up "well-known" values via the name attribute on azure_cloud members + all_clouds = [x[1] for x in inspect.getmembers(azure_cloud) if isinstance(x[1], azure_cloud.Cloud)] + matched_clouds = [x for x in all_clouds if x.name == _cloud_environment] + if len(matched_clouds) == 1: + cloud_environment = matched_clouds[0] + elif len(matched_clouds) > 1: + self.fail("Azure SDK failure: more than one cloud matched for cloud_environment name '{0}'".format(_cloud_environment)) + else: + if not urlparse.urlparse(_cloud_environment).scheme: + self.fail("cloud_environment must be an endpoint discovery URL or one of {0}".format([x.name for x in all_clouds])) + try: + cloud_environment = azure_cloud.get_cloud_from_metadata_endpoint(_cloud_environment) + except Exception as exc: + self.fail("cloud_environment {0} could not be resolved: {1}".format(_cloud_environment, str(exc)), exception=traceback.format_exc()) + + credentials = MSIAuthentication(client_id=client_id, cloud_environment=cloud_environment) + credential = MSIAuthenticationWrapper(client_id=client_id, cloud_environment=cloud_environment) + subscription_id = subscription_id or self._get_env('subscription_id') + if not subscription_id: + try: + # use the first subscription of the MSI + subscription_client = SubscriptionClient(credentials) + subscription = next(subscription_client.subscriptions.list()) + subscription_id = str(subscription.subscription_id) + except Exception as exc: + self.fail("Failed to get MSI token: {0}. " + "Please check whether your machine enabled MSI or grant access to any subscription.".format(str(exc))) + return { + 'credentials': credentials, + 'credential': credential, + 'subscription_id': subscription_id, + 'cloud_environment': cloud_environment, + 'auth_source': 'msi' + } + + def _get_azure_cli_credentials(self, subscription_id=None, resource=None): + if self.is_ad_resource: + resource = 'https://graph.windows.net/' + subscription_id = subscription_id or self._get_env('subscription_id') + try: + profile = get_cli_profile() + except Exception as exc: + self.fail("Failed to load CLI profile {0}.".format(str(exc))) + + credentials, subscription_id, tenant = profile.get_login_credentials( + subscription_id=subscription_id, resource=resource) + cloud_environment = get_cli_active_cloud() + + cli_credentials = { + 'credentials': credentials, + 'subscription_id': subscription_id, + 'cloud_environment': cloud_environment + } + return cli_credentials + + def _get_env_credentials(self): + env_credentials = dict() + for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items(): + env_credentials[attribute] = os.environ.get(env_variable, None) + + if env_credentials['profile']: + credentials = self._get_profile(env_credentials['profile']) + return credentials + + if env_credentials.get('subscription_id') is not None: + return env_credentials + + return None + + def _get_credentials(self, auth_source=None, **params): + # Get authentication credentials. + self.log('Getting credentials') + + arg_credentials = dict() + for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items(): + arg_credentials[attribute] = params.get(attribute, None) + + if auth_source == 'msi': + self.log('Retrieving credentials from MSI') + return self._get_msi_credentials(subscription_id=params.get('subscription_id'), client_id=params.get('client_id'), + _cloud_environment=params.get('cloud_environment')) + + if auth_source == 'cli': + if not HAS_AZURE_CLI_CORE: + self.fail(msg=missing_required_lib('azure-cli', reason='for `cli` auth_source'), + exception=HAS_AZURE_CLI_CORE_EXC) + try: + self.log('Retrieving credentials from Azure CLI profile') + cli_credentials = self._get_azure_cli_credentials(subscription_id=params.get('subscription_id')) + return cli_credentials + except CLIError as err: + self.fail("Azure CLI profile cannot be loaded - {0}".format(err)) + + if auth_source == 'env': + self.log('Retrieving credentials from environment') + env_credentials = self._get_env_credentials() + return env_credentials + + if auth_source == 'credential_file': + self.log("Retrieving credentials from credential file") + profile = params.get('profile') or 'default' + default_credentials = self._get_profile(profile) + return default_credentials + + # auto, precedence: module parameters -> environment variables -> default profile in ~/.azure/credentials -> azure cli + # try module params + if arg_credentials['profile'] is not None: + self.log('Retrieving credentials with profile parameter.') + credentials = self._get_profile(arg_credentials['profile']) + return credentials + + if arg_credentials['client_id'] or arg_credentials['ad_user']: + self.log('Received credentials from parameters.') + return arg_credentials + + # try environment + env_credentials = self._get_env_credentials() + if env_credentials: + self.log('Received credentials from env.') + return env_credentials + + # try default profile from ~./azure/credentials + default_credentials = self._get_profile() + if default_credentials: + self.log('Retrieved default profile credentials from ~/.azure/credentials.') + return default_credentials + + try: + if HAS_AZURE_CLI_CORE: + self.log('Retrieving credentials from AzureCLI profile') + cli_credentials = self._get_azure_cli_credentials(subscription_id=params.get('subscription_id')) + return cli_credentials + except CLIError as ce: + self.log('Error getting AzureCLI profile credentials - {0}'.format(ce)) + + return None + + def acquire_token_with_username_password(self, authority, resource, username, password, client_id, tenant): + authority_uri = authority + + if tenant is not None: + authority_uri = authority + '/' + tenant + + context = AuthenticationContext(authority_uri) + token_response = context.acquire_token_with_username_password(resource, username, password, client_id) + + return AADTokenCredentials(token_response) + + def acquire_token_with_client_certificate(self, authority, resource, x509_certificate_path, thumbprint, client_id, tenant): + authority_uri = authority + + if tenant is not None: + authority_uri = authority + '/' + tenant + + context = AuthenticationContext(authority_uri) + x509_certificate = None + with open(x509_certificate_path, 'rb') as pem_file: + x509_certificate = pem_file.read() + token_response = context.acquire_token_with_client_certificate(resource, client_id, x509_certificate, thumbprint) + + return AADTokenCredentials(token_response) + + def log(self, msg, pretty_print=False): + pass + # Use only during module development + # if self.debug: + # log_file = open('azure_rm.log', 'a') + # if pretty_print: + # log_file.write(json.dumps(msg, indent=4, sort_keys=True)) + # else: + # log_file.write(msg + u'\n') diff --git a/ansible_collections/azure/azcollection/plugins/module_utils/azure_rm_common_ext.py b/ansible_collections/azure/azcollection/plugins/module_utils/azure_rm_common_ext.py new file mode 100644 index 000000000..df477c89b --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/module_utils/azure_rm_common_ext.py @@ -0,0 +1,215 @@ +# Copyright (c) 2019 Zim Kalinowski, (@zikalino) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +import re +from ansible.module_utils.common.dict_transformations import _camel_to_snake, _snake_to_camel +from ansible.module_utils.six import string_types + + +class AzureRMModuleBaseExt(AzureRMModuleBase): + + def inflate_parameters(self, spec, body, level): + if isinstance(body, list): + for item in body: + self.inflate_parameters(spec, item, level) + return + for name in spec.keys(): + # first check if option was passed + param = body.get(name) + if param is None: + if spec[name].get('purgeIfNone', False): + body.pop(name, None) + continue + # check if pattern needs to be used + pattern = spec[name].get('pattern', None) + if pattern: + if pattern == 'camelize': + param = _snake_to_camel(param, True) + elif isinstance(pattern, list): + normalized = None + for p in pattern: + normalized = self.normalize_resource_id(param, p) + body[name] = normalized + if normalized is not None: + break + else: + param = self.normalize_resource_id(param, pattern) + body[name] = param + disposition = spec[name].get('disposition', '*') + if level == 0 and not disposition.startswith('/'): + continue + if disposition == '/': + disposition = '/*' + parts = disposition.split('/') + if parts[0] == '': + # should fail if level is > 0? + parts.pop(0) + target_dict = body + elem = body.pop(name) + while len(parts) > 1: + target_dict = target_dict.setdefault(parts.pop(0), {}) + targetName = parts[0] if parts[0] != '*' else name + target_dict[targetName] = elem + if spec[name].get('options'): + self.inflate_parameters(spec[name].get('options'), target_dict[targetName], level + 1) + + def normalize_resource_id(self, value, pattern): + ''' + Return a proper resource id string.. + + :param resource_id: It could be a resource name, resource id or dict containing parts from the pattern. + :param pattern: pattern of resource is, just like in Azure Swagger + ''' + value_dict = {} + if isinstance(value, string_types): + value_parts = value.split('/') + if len(value_parts) == 1: + value_dict['name'] = value + else: + pattern_parts = pattern.split('/') + if len(value_parts) != len(pattern_parts): + return None + for i in range(len(value_parts)): + if pattern_parts[i].startswith('{'): + value_dict[pattern_parts[i][1:-1]] = value_parts[i] + elif value_parts[i].lower() != pattern_parts[i].lower(): + return None + elif isinstance(value, dict): + value_dict = value + else: + return None + if not value_dict.get('subscription_id'): + value_dict['subscription_id'] = self.subscription_id + if not value_dict.get('resource_group'): + value_dict['resource_group'] = self.resource_group + + # check if any extra values passed + for k in value_dict: + if not ('{' + k + '}') in pattern: + return None + # format url + return pattern.format(**value_dict) + + def idempotency_check(self, old_params, new_params): + ''' + Return True if something changed. Function will use fields from module_arg_spec to perform dependency checks. + :param old_params: old parameters dictionary, body from Get request. + :param new_params: new parameters dictionary, unpacked module parameters. + ''' + modifiers = {} + result = {} + self.create_compare_modifiers(self.module.argument_spec, '', modifiers) + self.results['modifiers'] = modifiers + return self.default_compare(modifiers, new_params, old_params, '', self.results) + + def create_compare_modifiers(self, arg_spec, path, result): + for k in arg_spec.keys(): + o = arg_spec[k] + updatable = o.get('updatable', True) + comparison = o.get('comparison', 'default') + disposition = o.get('disposition', '*') + if disposition == '/': + disposition = '/*' + p = (path + + ('/' if len(path) > 0 else '') + + disposition.replace('*', k) + + ('/*' if o['type'] == 'list' else '')) + if comparison != 'default' or not updatable: + result[p] = {'updatable': updatable, 'comparison': comparison} + if o.get('options'): + self.create_compare_modifiers(o.get('options'), p, result) + + def default_compare(self, modifiers, new, old, path, result): + ''' + Default dictionary comparison. + This function will work well with most of the Azure resources. + It correctly handles "location" comparison. + + Value handling: + - if "new" value is None, it will be taken from "old" dictionary if "incremental_update" + is enabled. + List handling: + - if list contains "name" field it will be sorted by "name" before comparison is done. + - if module has "incremental_update" set, items missing in the new list will be copied + from the old list + + Warnings: + If field is marked as non-updatable, appropriate warning will be printed out and + "new" structure will be updated to old value. + + :modifiers: Optional dictionary of modifiers, where key is the path and value is dict of modifiers + :param new: New version + :param old: Old version + + Returns True if no difference between structures has been detected. + Returns False if difference was detected. + ''' + if new is None: + return True + elif isinstance(new, dict): + comparison_result = True + if not isinstance(old, dict): + result['compare'].append('changed [' + path + '] old dict is null') + comparison_result = False + else: + for k in set(new.keys()) | set(old.keys()): + new_item = new.get(k, None) + old_item = old.get(k, None) + if new_item is None: + if isinstance(old_item, dict): + new[k] = old_item + result['compare'].append('new item was empty, using old [' + path + '][ ' + k + ' ]') + elif not self.default_compare(modifiers, new_item, old_item, path + '/' + k, result): + comparison_result = False + return comparison_result + elif isinstance(new, list): + comparison_result = True + if not isinstance(old, list) or len(new) != len(old): + result['compare'].append('changed [' + path + '] length is different or old value is null') + comparison_result = False + elif len(old) > 0: + if isinstance(old[0], dict): + key = None + if 'id' in old[0] and 'id' in new[0]: + key = 'id' + elif 'name' in old[0] and 'name' in new[0]: + key = 'name' + else: + key = next(iter(old[0])) + new = sorted(new, key=lambda x: x.get(key, None)) + old = sorted(old, key=lambda x: x.get(key, None)) + else: + new = sorted(new) + old = sorted(old) + for i in range(len(new)): + if not self.default_compare(modifiers, new[i], old[i], path + '/*', result): + comparison_result = False + return comparison_result + else: + updatable = modifiers.get(path, {}).get('updatable', True) + comparison = modifiers.get(path, {}).get('comparison', 'default') + if comparison == 'ignore': + return True + elif comparison == 'default' or comparison == 'sensitive': + if isinstance(old, string_types) and isinstance(new, string_types): + new = new.lower() + old = old.lower() + elif comparison == 'location': + if isinstance(old, string_types) and isinstance(new, string_types): + new = new.replace(' ', '').lower() + old = old.replace(' ', '').lower() + if str(new) != str(old): + result['compare'].append('changed [' + path + '] ' + str(new) + ' != ' + str(old) + ' - ' + str(comparison)) + if updatable: + return False + else: + self.module.warn("property '" + path + "' cannot be updated (" + str(old) + "->" + str(new) + ")") + return True + else: + return True diff --git a/ansible_collections/azure/azcollection/plugins/module_utils/azure_rm_common_rest.py b/ansible_collections/azure/azcollection/plugins/module_utils/azure_rm_common_rest.py new file mode 100644 index 000000000..30908be53 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/module_utils/azure_rm_common_rest.py @@ -0,0 +1,104 @@ +# Copyright (c) 2018 Zim Kalinowski, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +try: + from ansible.module_utils.ansible_release import __version__ as ANSIBLE_VERSION +except Exception: + ANSIBLE_VERSION = 'unknown' + +try: + from msrestazure.azure_exceptions import CloudError + from msrestazure.azure_configuration import AzureConfiguration + from msrest.service_client import ServiceClient + from msrest.pipeline import ClientRawResponse + from msrest.polling import LROPoller + from msrestazure.polling.arm_polling import ARMPolling + import uuid + import json +except ImportError: + # This is handled in azure_rm_common + AzureConfiguration = object + +ANSIBLE_USER_AGENT = 'Ansible/{0}'.format(ANSIBLE_VERSION) + + +class GenericRestClientConfiguration(AzureConfiguration): + + def __init__(self, credentials, subscription_id, base_url=None): + + if credentials is None: + raise ValueError("Parameter 'credentials' must not be None.") + if subscription_id is None: + raise ValueError("Parameter 'subscription_id' must not be None.") + if not base_url: + base_url = 'https://management.azure.com' + + super(GenericRestClientConfiguration, self).__init__(base_url) + + self.add_user_agent(ANSIBLE_USER_AGENT) + + self.credentials = credentials + self.subscription_id = subscription_id + + +class GenericRestClient(object): + + def __init__(self, credentials, subscription_id, base_url=None): + self.config = GenericRestClientConfiguration(credentials, subscription_id, base_url) + self._client = ServiceClient(self.config.credentials, self.config) + self.models = None + + def query(self, url, method, query_parameters, header_parameters, body, expected_status_codes, polling_timeout, polling_interval): + # Construct and send request + operation_config = {} + + request = None + + if header_parameters is None: + header_parameters = {} + + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + + if method == 'GET': + request = self._client.get(url, query_parameters) + elif method == 'PUT': + request = self._client.put(url, query_parameters) + elif method == 'POST': + request = self._client.post(url, query_parameters) + elif method == 'HEAD': + request = self._client.head(url, query_parameters) + elif method == 'PATCH': + request = self._client.patch(url, query_parameters) + elif method == 'DELETE': + request = self._client.delete(url, query_parameters) + elif method == 'MERGE': + request = self._client.merge(url, query_parameters) + + response = self._client.send(request, header_parameters, body, **operation_config) + + if response.status_code not in expected_status_codes: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + elif response.status_code == 202 and polling_timeout > 0: + def get_long_running_output(response): + return response + poller = LROPoller(self._client, + ClientRawResponse(None, response), + get_long_running_output, + ARMPolling(polling_interval, **operation_config)) + response = self.get_poller_result(poller, polling_timeout) + + return response + + def get_poller_result(self, poller, timeout): + try: + poller.wait(timeout=timeout) + return poller.result() + except Exception as exc: + raise diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_account_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_account_info.py new file mode 100644 index 000000000..90e2799fe --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_account_info.py @@ -0,0 +1,224 @@ +#!/usr/bin/python +# +# Copyright (c) 2022 Mandar Kulkarni, < @mandar242 > +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_account_info + +version_added: "1.14.0" + +short_description: Get Azure Account facts (output of az account show) + +description: + - Get facts for current logged in user. + - Output equivalent of `az account show` command. + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Mandar Kulkarni (@mandar242) +''' + +EXAMPLES = ''' +- name: Get facts for current logged in user + azure.azcollection.azure_rm_account_info: +''' + +RETURN = ''' +account_info: + description: + - Facts for current logged in user, equivalent to `az account show`. + returned: always + type: dict + contains: + environmentName: + description: For cloud environments other than the US public cloud, the environment name. + returned: always + type: str + sample: AzureCloud + homeTenantId: + description: Subscription tenant id. + returned: always + type: str + sample: "00000000-0000-0000-0000-000000000000" + id: + description: Subscription id. + returned: always + type: str + sample: "00000000-0000-0000-0000-000000000000" + managedByTenants: + description: An array containing the tenants managing the subscription. + returned: always + type: list + elements: dict + contains: + tenantId: + description: Subscription tenant id + returned: always + type: str + sample: "00000000-0000-0000-0000-000000000000" + name: + description: The subscription display name. + returned: always + type: str + sample: "Pay-As-You-Go" + state: + description: + - The subscription state. + - Possible values include "Enabled", "Warned", "PastDue", "Disabled", "Deleted". + returned: always + type: str + sample: "Enabled" + tenant_id: + description: Subscription tenant id + returned: always + type: str + sample: "00000000-0000-0000-0000-000000000000" + user: + description: An dict containing the current user name and type. + returned: always + type: dict + elements: str + contains: + name: + description: The principal name of the active directory user. + returned: always + type: str + sample: "sample-user@sample-tenant.onmicrosoft.com" + type: + description: Active Directory user type. + returned: always + type: str + sample: "User" +''' + + +try: + from msrestazure.azure_exceptions import CloudError + from azure.graphrbac import GraphRbacManagementClient + from azure.graphrbac.models import GraphErrorException +except ImportError: + # This is handled in azure_rm_common + pass + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMAuth + + +class AzureRMAccountInfo(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + ) + + self.results = dict( + changed=False, + account_info=[] + ) + + # Different return info is gathered using 2 different clients + # 1. All except "user" section of the return value uses azure.mgmt.subsctiption.operations.subscriptionoperations + # 2. "user" section of the return value uses different client (graphrbac) + + super(AzureRMAccountInfo, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + is_ad_resource=False) + + def exec_module(self, **kwargs): + + result = [] + result = self.list_items() + + self.results['account_info'] = result + return self.results + + def list_items(self): + + results = {} + + # Get + # "homeTenantId": "xxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxx", + # "id": "xxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxx", + # "isDefault": true, <- WIP on getting this param + # "managedByTenants": [ + # { + # "tenantId": "64xxxxxx-xxxx-49fc-xxxx-ebxxxxxxxxxx" + # }, + # { + # "tenantId": "2axxxxxx-xxxx-xxxx-a339-ebxxxxxxxxxx" + # }, + # { + # "tenantId": "xxxxxxxx-xxxx-4e68-xxxx-ebxxxxxxxxxx" + # } + # ], + # "name": "Pay-As-You-Go", + # "state": "Enabled", + # "tenantId": "xxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxx", + + # Makes use of azure.mgmt.subsctiption.operations.subscriptionoperations + # https://docs.microsoft.com/en-us/python/api/azure-mgmt-subscription/azure.mgmt.subscription.operations.subscriptionsoperations?view=azure-python#methods + + try: + subscription_list_response = list(self.subscription_client.subscriptions.list()) + except CloudError as exc: + self.fail("Failed to list all subscriptions - {0}".format(str(exc))) + + results['id'] = subscription_list_response[0].subscription_id + results['tenantId'] = subscription_list_response[0].tenant_id + results['homeTenantId'] = subscription_list_response[0].tenant_id + results['name'] = subscription_list_response[0].display_name + results['state'] = subscription_list_response[0].state + results['managedByTenants'] = self.get_managed_by_tenants_list(subscription_list_response[0].managed_by_tenants) + results['environmentName'] = self.azure_auth._cloud_environment.name + results['user'] = self.get_aduser_info(subscription_list_response[0].tenant_id) + + return results + + def get_managed_by_tenants_list(self, object_list): + + return [dict(tenantId=item.tenant_id) for item in object_list] + + def get_aduser_info(self, tenant_id): + + # Create GraphRbacManagementClient for getting + # "user": { + # "name": "mandar123456@abcdefg.onmicrosoft.com", + # "type": "user"self. + # } + + # Makes use of azure graphrbac + # https://docs.microsoft.com/en-us/python/api/overview/azure/microsoft-graph?view=azure-python#client-library + + user = {} + self.azure_auth_graphrbac = AzureRMAuth(is_ad_resource=True) + cred = self.azure_auth_graphrbac.azure_credentials + base_url = self.azure_auth_graphrbac._cloud_environment.endpoints.active_directory_graph_resource_id + client = GraphRbacManagementClient(cred, tenant_id, base_url) + + try: + user_info = client.signed_in_user.get() + user['name'] = user_info.user_principal_name + user['type'] = user_info.object_type + + except GraphErrorException as e: + self.fail("failed to get ad user info {0}".format(str(e))) + + return user + + +def main(): + AzureRMAccountInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adapplication.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adapplication.py new file mode 100644 index 000000000..f65f8b191 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adapplication.py @@ -0,0 +1,671 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 Guopeng Lin, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_adapplication + +version_added: "1.6.0" + +short_description: Manage Azure Active Directory application + +description: + - Manage Azure Active Directory application. + +options: + tenant: + description: + - The tenant ID. + type: str + required: True + + app_id: + description: + - Application ID. + type: str + + display_name: + description: + - The display name of the application. + type: str + + app_roles: + description: + - Declare the roles you want to associate with your application. + type: list + elements: dict + suboptions: + allowed_member_types: + description: + - Specifies whether this app role can be assigned to users and groups I(allowed_member_types=User). + - To other application's I(allowed_member_types=Application). + - Or both C(User) and C(Appplication). + type: list + elements: str + required: True + description: + description: + - The description for the app role. + - This is displayed when the app role is being assigned. + - if the app role functions as an application permission, during consent experiences. + type: str + display_name: + description: + - Display name for the permission that appears in the app role assignment and consent experiences. + type: str + is_enabled: + description: + - When creating or updating an app role, this must be set to true (which is the default). + - To delete a role, this must first be set to false. + - At that point, in a subsequent call, this role may be removed. + type: bool + value: + description: + - Specifies the value to include in the roles claim in ID tokens and access tokens authenticating an assigned user or service principal. + - Must not exceed 120 characters in length. + - Allowed characters include ! # $ % & ' ( ) * + , - . / : ; < = > ? @ [ ] ^ + _ ` { | } ~, and characters in the ranges 0-9, A-Z and a-z. + - Any other character, including the space character, are not allowed. + type: str + + available_to_other_tenants: + description: + - The application can be used from any Azure AD tenants. + type: bool + + credential_description: + description: + - The description of the password. + type: str + + end_date: + description: + - Date or datetime after which credentials expire(e.g. '2017-12-31'). + - Default value is one year after current time. + type: str + + homepage: + description: + - The url where users can sign in and use your app. + type: str + + identifier_uris: + description: + - Space-separated unique URIs that Azure AD can use for this app. + elements: str + type: list + + key_type: + description: + - The type of the key credentials associated with the application. + type: str + default: AsymmetricX509Cert + choices: + - AsymmetricX509Cert + - Password + - Symmetric + + key_usage: + description: + - The usage of the key credentials associated with the application. + type: str + default: Verify + choices: + - Sign + - Verify + + key_value: + description: + - The value for the key credentials associated with the application. + type: str + + native_app: + description: + - An application which can be installed on a user's device or computer. + type: bool + + oauth2_allow_implicit_flow: + description: + - Whether to allow implicit grant flow for OAuth2. + type: bool + + optional_claims: + description: + - Declare the optional claims for the application. + type: list + elements: dict + suboptions: + name: + description: + - The name of the optional claim. + type: str + required: True + source: + description: + - The source (directory object) of the claim. + - There are predefined claims and user-defined claims from extension properties. + - If the source value is null, the claim is a predefined optional claim. + - If the source value is user, the value in the name property is the extension property from the user object. + type: str + essential: + description: + - If the value is true, the claim specified by the client is necessary to ensure a smooth authorization experience + for the specific task requested by the end user. + - The default value is false. + default: false + type: bool + additional_properties: + description: + - Additional properties of the claim. + - If a property exists in this collection, it modifies the behavior of the optional claim specified in the name property. + type: str + password: + description: + - App password, aka 'client secret'. + type: str + + reply_urls: + description: + - Space-separated URIs to which Azure AD will redirect in response to an OAuth 2.0 request. + - The value does not need to be a physical endpoint, but must be a valid URI. + type: list + elements: str + + required_resource_accesses: + description: + - Resource scopes and roles the application requires access to. + - Should be in manifest json format. + type: list + elements: dict + suboptions: + resource_app_id: + description: + - The unique identifier for the resource that the application requires access to. + - This should be equal to the appId declared on the target resource application. + type: str + resource_access: + description: + - The description of the app role. + type: list + elements: dict + suboptions: + id: + description: + - The unique identifier for one of the oauth2PermissionScopes or appRole instances that the resource application exposes. + type: str + type: + description: + - Specifies whether the id property references an oauth2PermissionScopes or an appRole. + - Possible values are Scope or Role. + type: str + + start_date: + description: + - Date or datetime at which credentials become valid, such as '2017-01-01'. + - Default value is current time. + type: str + allow_guests_sign_in: + description: + - A property on the application to indicate if the application accepts other IDPs or not or partially accepts. + type: bool + state: + description: + - Assert the state of Active Dirctory service principal. + - Use C(present) to create or update a Password and use C(absent) to delete. + default: present + choices: + - absent + - present + type: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + guopeng_lin (@guopenglin) + haiyuan_zhang (@haiyuazhang) + Fred-sun (@Fred-sun) +''' + +EXAMPLES = ''' + - name: Create ad application + azure_rm_adapplication: + tenant: "{{ tenant_id }}" + display_name: "{{ display_name }}" + + - name: Create application with more parameter + azure_rm_adapplication: + tenant: "{{ tenant_id }}" + display_name: "{{ display_name }}" + available_to_other_tenants: False + credential_description: "for test" + end_date: 2021-10-01 + start_date: 2021-05-18 + identifier_uris: fredtest02.com + + - name: delete ad application + azure_rm_adapplication: + tenant: "{{ tenant_id }}" + app_id: "{{ app_id }}" + state: absent +''' + +RETURN = ''' +output: + description: + - Current state of the adapplication. + type: complex + returned: awalys + contains: + display_name: + description: + - Object's display name or its prefix. + type: str + returned: always + sample: fredAKSCluster + app_id: + description: + - The application ID. + returned: always + type: str + sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + object_id: + description: + - Object ID of the application + returned: always + type: str + sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + available_to_other_tenants: + description: + - The application can be used from any Azure AD tenants. + returned: always + type: bool + sample: false + homepage: + description: + - The url where users can sign in and use your app. + returned: always + type: str + sample: null + identifier_uris: + description: + - Space-separated unique URIs that Azure AD can use for this app. + returned: always + type: list + sample: [] + oauth2_allow_implicit_flow: + description: + - Whether to allow implicit grant flow for OAuth2. + returned: always + type: bool + sample: false + optional_claims: + description: + - The optional claims for the application. + returned: always + type: list + sample: [] + reply_urls: + description: + - Space-separated URIs to which Azure AD will redirect in response to an OAuth 2.0 request. + returned: always + type: list + sample: [] +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt + +try: + from msrestazure.azure_exceptions import CloudError + from azure.graphrbac.models import GraphErrorException + import datetime + from dateutil.relativedelta import relativedelta + import dateutil.parser + from azure.graphrbac.models import ApplicationCreateParameters + import uuid + from azure.graphrbac.models import ResourceAccess + from azure.graphrbac.models import RequiredResourceAccess + from azure.graphrbac.models import AppRole + from azure.graphrbac.models import PasswordCredential, KeyCredential + from azure.graphrbac.models import ApplicationUpdateParameters +except ImportError: + # This is handled in azure_rm_common + pass + +app_role_spec = dict( + allowed_member_types=dict( + type='list', + elements='str', + required=True + ), + description=dict( + type='str' + ), + display_name=dict( + type='str' + ), + is_enabled=dict( + type='bool' + ), + value=dict( + type='str' + ) +) + +optional_claims_spec = dict( + name=dict( + type='str', + required=True + ), + source=dict( + type='str' + ), + essential=dict( + type='bool', + default=False + ), + additional_properties=dict( + type='str' + ) +) +required_resource_accesses_spec = dict( + resource_app_id=dict( + type='str' + ), + resource_access=dict( + type='list', + elements='dict', + options=dict( + id=dict( + type='str' + ), + type=dict( + type='str' + ) + ) + ) +) + + +class AzureRMADApplication(AzureRMModuleBaseExt): + def __init__(self): + + self.module_arg_spec = dict( + tenant=dict(type='str', required=True), + app_id=dict(type='str'), + display_name=dict(type='str'), + app_roles=dict(type='list', elements='dict', options=app_role_spec), + available_to_other_tenants=dict(type='bool'), + credential_description=dict(type='str'), + end_date=dict(type='str'), + homepage=dict(type='str'), + allow_guests_sign_in=dict(type='bool'), + identifier_uris=dict(type='list', elements='str'), + key_type=dict(type='str', default='AsymmetricX509Cert', choices=['AsymmetricX509Cert', 'Password', 'Symmetric']), + key_usage=dict(type='str', default='Verify', choices=['Sign', 'Verify']), + key_value=dict(type='str', no_log=True), + native_app=dict(type='bool'), + oauth2_allow_implicit_flow=dict(type='bool'), + optional_claims=dict(type='list', elements='dict', options=optional_claims_spec), + password=dict(type='str', no_log=True), + reply_urls=dict(type='list', elements='str'), + start_date=dict(type='str'), + required_resource_accesses=dict(type='list', elements='dict', options=required_resource_accesses_spec), + state=dict(type='str', default='present', choices=['present', 'absent']), + ) + + self.state = None + self.tenant = None + self.app_id = None + self.display_name = None + self.app_roles = None + self.available_to_other_tenants = None + self.credential_description = None + self.end_date = None + self.homepage = None + self.identifier_uris = None + self.key_type = None + self.key_usage = None + self.key_value = None + self.native_app = None + self.oauth2_allow_implicit_flow = None + self.optional_claims = None + self.password = None + self.reply_urls = None + self.start_date = None + self.required_resource_accesses = None + self.allow_guests_sign_in = None + self.results = dict(changed=False) + + super(AzureRMADApplication, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=False, + supports_tags=False, + is_ad_resource=True) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()): + setattr(self, key, kwargs[key]) + + response = self.get_resource() + + if response: + if self.state == 'present': + if self.check_update(response): + self.update_resource(response) + elif self.state == 'absent': + self.delete_resource(response) + else: + if self.state == 'present': + self.create_resource() + elif self.state == 'absent': + self.log("try to delete non exist resource") + + return self.results + + def create_resource(self): + try: + key_creds, password_creds, required_accesses, app_roles, optional_claims = None, None, None, None, None + if self.native_app: + if self.identifier_uris: + self.fail("'identifier_uris' is not required for creating a native application") + else: + password_creds, key_creds = self.build_application_creds(self.password, self.key_value, self.key_type, self.key_usage, + self.start_date, self.end_date, self.credential_description) + if self.required_resource_accesses: + required_accesses = self.build_application_accesses(self.required_resource_accesses) + + if self.app_roles: + app_roles = self.build_app_roles(self.app_roles) + + client = self.get_graphrbac_client(self.tenant) + app_create_param = ApplicationCreateParameters(available_to_other_tenants=self.available_to_other_tenants, + display_name=self.display_name, + identifier_uris=self.identifier_uris, + homepage=self.homepage, + reply_urls=self.reply_urls, + key_credentials=key_creds, + password_credentials=password_creds, + oauth2_allow_implicit_flow=self.oauth2_allow_implicit_flow, + required_resource_access=required_accesses, + app_roles=app_roles, + allow_guests_sign_in=self.allow_guests_sign_in, + optional_claims=self.optional_claims) + response = client.applications.create(app_create_param) + self.results['changed'] = True + self.results.update(self.to_dict(response)) + return response + except GraphErrorException as ge: + self.fail("Error creating application, display_name {0} - {1}".format(self.display_name, str(ge))) + + def update_resource(self, old_response): + try: + client = self.get_graphrbac_client(self.tenant) + key_creds, password_creds, required_accesses, app_roles, optional_claims = None, None, None, None, None + if self.native_app: + if self.identifier_uris: + self.fail("'identifier_uris' is not required for creating a native application") + else: + password_creds, key_creds = self.build_application_creds(self.password, self.key_value, self.key_type, self.key_usage, + self.start_date, self.end_date, self.credential_description) + if self.required_resource_accesses: + required_accesses = self.build_application_accesses(self.required_resource_accesses) + + if self.app_roles: + app_roles = self.build_app_roles(self.app_roles) + app_update_param = ApplicationUpdateParameters(available_to_other_tenants=self.available_to_other_tenants, + display_name=self.display_name, + identifier_uris=self.identifier_uris, + homepage=self.homepage, + reply_urls=self.reply_urls, + key_credentials=key_creds, + password_credentials=password_creds, + oauth2_allow_implicit_flow=self.oauth2_allow_implicit_flow, + required_resource_access=required_accesses, + allow_guests_sign_in=self.allow_guests_sign_in, + app_roles=app_roles, + optional_claims=self.optional_claims) + client.applications.patch(old_response['object_id'], app_update_param) + self.results['changed'] = True + self.results.update(self.get_resource()) + + except GraphErrorException as ge: + self.fail("Error updating the application app_id {0} - {1}".format(self.app_id, str(ge))) + + def delete_resource(self, response): + try: + client = self.get_graphrbac_client(self.tenant) + client.applications.delete(response.get('object_id')) + self.results['changed'] = True + return True + except GraphErrorException as ge: + self.fail("Error deleting application app_id {0} display_name {1} - {2}".format(self.app_id, self.display_name, str(ge))) + + def get_resource(self): + try: + client = self.get_graphrbac_client(self.tenant) + existing_apps = [] + if self.app_id: + existing_apps = list(client.applications.list(filter="appId eq '{0}'".format(self.app_id))) + if not existing_apps: + return False + result = existing_apps[0] + return self.to_dict(result) + except GraphErrorException as ge: + self.log("Did not find the graph instance instance {0} - {1}".format(self.app_id, str(ge))) + return False + + def check_update(self, response): + for key in list(self.module_arg_spec.keys()): + attr = getattr(self, key) + if attr and key in response: + if (response and attr != response[key]) or response[key] is None: + return True + return False + + def to_dict(self, object): + app_roles = [{ + 'id': app_role.id, + 'display_name': app_role.display_name, + 'is_enabled': app_role.is_enabled, + 'value': app_role.value, + "description": app_role.description + }for app_role in object.app_roles] + return dict( + app_id=object.app_id, + object_id=object.object_id, + display_name=object.display_name, + app_roles=app_roles, + available_to_other_tenants=object.available_to_other_tenants, + homepage=object.homepage, + identifier_uris=object.identifier_uris, + oauth2_allow_implicit_flow=object.oauth2_allow_implicit_flow, + optional_claims=object.optional_claims, + allow_guests_sign_in=object.allow_guests_sign_in, + reply_urls=object.reply_urls + ) + + def build_application_creds(self, password=None, key_value=None, key_type=None, key_usage=None, + start_date=None, end_date=None, key_description=None): + if password and key_value: + self.fail('specify either password or key_value, but not both.') + + if not start_date: + start_date = datetime.datetime.utcnow() + elif isinstance(start_date, str): + start_date = dateutil.parser.parse(start_date) + + if not end_date: + end_date = start_date + relativedelta(years=1) - relativedelta(hours=24) + elif isinstance(end_date, str): + end_date = dateutil.parser.parse(end_date) + + custom_key_id = None + if key_description and password: + custom_key_id = self.encode_custom_key_description(key_description) + + key_type = key_type or 'AsymmetricX509Cert' + key_usage = key_usage or 'Verify' + + password_creds = None + key_creds = None + if password: + password_creds = [PasswordCredential(start_date=start_date, end_date=end_date, key_id=str(self.gen_guid()), + value=password, custom_key_identifier=custom_key_id)] + elif key_value: + key_creds = [ + KeyCredential(start_date=start_date, end_date=end_date, key_id=str(self.gen_guid()), value=key_value, + usage=key_usage, type=key_type, custom_key_identifier=custom_key_id)] + + return (password_creds, key_creds) + + def encode_custom_key_description(self, key_description): + # utf16 is used by AAD portal. Do not change it to other random encoding + # unless you know what you are doing. + return key_description.encode('utf-16') + + def gen_guid(self): + return uuid.uuid4() + + def build_application_accesses(self, required_resource_accesses): + if not required_resource_accesses: + return None + required_accesses = [] + if isinstance(required_resource_accesses, dict): + self.log('Getting "requiredResourceAccess" from a full manifest') + required_resource_accesses = required_resource_accesses.get('required_resource_access', []) + for x in required_resource_accesses: + + accesses = [ResourceAccess(id=y['id'], type=y['type']) for y in x['resource_access']] + required_accesses.append(RequiredResourceAccess(resource_app_id=x['resource_app_id'], + resource_access=accesses)) + return required_accesses + + def build_app_roles(self, app_roles): + if not app_roles: + return None + result = [] + if isinstance(app_roles, dict): + self.log('Getting "appRoles" from a full manifest') + app_roles = app_roles.get('appRoles', []) + for x in app_roles: + role = AppRole(id=x.get('id', None) or self.gen_guid(), + allowed_member_types=x.get('allowed_member_types', None), + description=x.get('description', None), display_name=x.get('display_name', None), + is_enabled=x.get('is_enabled', None), value=x.get('value', None)) + result.append(role) + return result + + +def main(): + AzureRMADApplication() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adapplication_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adapplication_info.py new file mode 100644 index 000000000..becfdd0b3 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adapplication_info.py @@ -0,0 +1,177 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 Guopeng Lin, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: azure_rm_adapplication_info + +version_added: "1.6.0" + +short_description: Get Azure Active Directory application info + +description: + - Get Azure Active Directory application info. + +options: + app_id: + description: + - The application ID. + type: str + tenant: + description: + - The tenant ID. + type: str + required: True + object_id: + description: + - It's application's object ID. + type: str + identifier_uri: + description: + - It's identifier_uri's object ID. + type: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + haiyuan_zhang (@haiyuazhang) + Fred-sun (@Fred-sun) + guopeng_lin (@guopenglin) +''' + +EXAMPLES = ''' + - name: get ad app info by App ID + azure_rm_adapplication_info: + app_id: "{{ app_id }}" + tenant: "{{ tenant_id }}" + + - name: get ad app info ---- by object ID + azure_rm_adapplication_info: + object_id: "{{ object_id }}" + tenant: "{{ tenant_id }}" + + - name: get ad app info ---- by identifier uri + azure_rm_adapplication_info: + identifier_uri: "{{ identifier_uri }}" + tenant: "{{ tenant_id }}" + +''' + +RETURN = ''' +applications: + description: + - The info of the ad application. + type: complex + returned: aways + contains: + app_display_name: + description: + - Object's display name or its prefix. + type: str + returned: always + sample: app + app_id: + description: + - The application ID. + returned: always + type: str + sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + identifier_uris: + description: + - The identifiers_uri list of app. + type: list + returned: always + sample: ["http://ansible-atodorov"] + object_id: + description: + - It's application's object ID. + returned: always + type: str + sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBase + +try: + from msrestazure.azure_exceptions import CloudError + from azure.graphrbac.models import GraphErrorException +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMADApplicationInfo(AzureRMModuleBase): + + def __init__(self): + self.module_arg_spec = dict( + app_id=dict( + type='str' + ), + object_id=dict( + type='str' + ), + identifier_uri=dict( + type='str' + ), + tenant=dict( + type='str', + required=True + ) + ) + self.tenant = None + self.app_id = None + self.object_id = None + self.identifier_uri = None + self.results = dict(changed=False) + super(AzureRMADApplicationInfo, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + is_ad_resource=True) + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()): + setattr(self, key, kwargs[key]) + + applications = [] + try: + client = self.get_graphrbac_client(self.tenant) + if self.object_id: + applications = [client.applications.get(self.object_id)] + else: + sub_filters = [] + if self.identifier_uri: + sub_filters.append("identifierUris/any(s:s eq '{0}')".format(self.identifier_uri)) + if self.app_id: + sub_filters.append("appId eq '{0}'".format(self.app_id)) + # applications = client.applications.list(filter=(' and '.join(sub_filters))) + applications = list(client.applications.list(filter=(' and '.join(sub_filters)))) + + self.results['applications'] = [self.to_dict(app) for app in applications] + except GraphErrorException as ge: + self.fail("failed to get application info {0}".format(str(ge))) + + return self.results + + def to_dict(self, object): + return dict( + app_id=object.app_id, + object_id=object.object_id, + app_display_name=object.display_name, + identifier_uris=object.identifier_uris + ) + + +def main(): + AzureRMADApplicationInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adgroup.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adgroup.py new file mode 100644 index 000000000..092ed8c78 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adgroup.py @@ -0,0 +1,443 @@ +#!/usr/bin/python +# +# Copyright (c) 2021 Cole Neubauer, (@coleneubauer) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +module: azure_rm_adgroup +version_added: "1.6.0" +short_description: Manage Azure Active Directory group +description: + - Create, update or delete Azure Active Directory group. +options: + tenant: + description: + - The tenant ID. + type: str + required: True + state: + description: + - Assert the state of the resource group. Use C(present) to create or update and C(absent) to delete. + default: present + choices: + - absent + - present + type: str + object_id: + description: + - The object id for the ad group. + - Can be used to reference when updating an existing group. + - Ignored when attempting to create a group. + type: str + display_name: + description: + - The display name of the ad group. + - Can be used with I(mail_nickname) instead of I(object_id) to reference existing group. + - Required when creating a new ad group. + type: str + mail_nickname: + description: + - The mail nickname of the ad group. + - Can be used with I(display_name) instead of I(object_id) to reference existing group. + - Required when creating a new ad group. + type: str + present_members: + description: + - The azure ad objects asserted to be members of the group. + - This list does not need to be all inclusive. Objects that are members and not on this list remain members. + type: list + elements: str + absent_members: + description: + - The azure ad objects asserted to not be members of the group. + type: list + elements: str + present_owners: + description: + - The azure ad objects asserted to be owners of the group. + - This list does not need to be all inclusive. Objects that are owners and not on this list remain members. + type: list + elements: str + absent_owners: + description: + - The azure ad objects asserted to not be owners of the group. + type: list + elements: str +extends_documentation_fragment: + - azure.azcollection.azure +author: + - Cole Neubauer(@coleneubauer) +''' + +EXAMPLES = ''' + - name: Create Group + azure_rm_adgroup: + tenant: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + display_name: "Group-Name" + mail_nickname: "Group-Mail-Nickname" + state: 'present' + + - name: Delete Group using display_name and mail_nickname + azure_rm_adgroup: + tenant: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + display_name: "Group-Name" + mail_nickname: "Group-Mail-Nickname" + state: 'absent' + + - name: Delete Group using object_id + azure_rm_adgroup: + tenant: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + object_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + state: 'absent' + + - name: Ensure Users are Members of a Group using display_name and mail_nickname + azure_rm_adgroup: + tenant: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + display_name: "Group-Name" + mail_nickname: "Group-Mail-Nickname" + state: 'present' + present_members: + - "https://graph.windows.net/{{ tenant_id }}/directoryObjects/{{ ad_object_1_object_id }}" + - "https://graph.windows.net/{{ tenant_id }}/directoryObjects/{{ ad_object_2_object_id }}" + + - name: Ensure Users are Members of a Group using object_id + azure_rm_adgroup: + tenant: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + object_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + state: 'present' + present_members: + - "https://graph.windows.net/{{ ad_object_1_tenant_id }}/directoryObjects/{{ ad_object_1_object_id }}" + - "https://graph.windows.net/{{ ad_object_2_tenant_id }}/directoryObjects/{{ ad_object_2_object_id }}" + + - name: Ensure Users are not Members of a Group using display_name and mail_nickname + azure_rm_adgroup: + tenant: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + display_name: "Group-Name" + mail_nickname: "Group-Mail-Nickname" + state: 'present' + absent_members: + - "{{ ad_object_1_object_id }}" + + - name: Ensure Users are Members of a Group using object_id + azure_rm_adgroup: + tenant: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + object_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + state: 'present' + absent_members: + - "{{ ad_object_1_object_id }}" + + - name: Ensure Users are Owners of a Group using display_name and mail_nickname + azure_rm_adgroup: + tenant: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + display_name: "Group-Name" + mail_nickname: "Group-Mail-Nickname" + state: 'present' + present_owners: + - "https://graph.windows.net/{{ tenant_id }}/directoryObjects/{{ ad_object_1_object_id }}" + - "https://graph.windows.net/{{ tenant_id }}/directoryObjects/{{ ad_object_2_object_id }}" + + - name: Ensure Users are Owners of a Group using object_id + azure_rm_adgroup: + tenant: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + object_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + state: 'present' + present_owners: + - "https://graph.windows.net/{{ ad_object_1_tenant_id }}/directoryObjects/{{ ad_object_1_object_id }}" + - "https://graph.windows.net/{{ ad_object_2_tenant_id }}/directoryObjects/{{ ad_object_2_object_id }}" + + - name: Ensure Users are not Owners of a Group using display_name and mail_nickname + azure_rm_adgroup: + tenant: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + display_name: "Group-Name" + mail_nickname: "Group-Mail-Nickname" + state: 'present' + absent_owners: + - "{{ ad_object_1_object_id }}" + - "{{ ad_object_2_object_id }}" + + - name: Ensure Users are Owners of a Group using object_id + azure_rm_adgroup: + tenant: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + object_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + state: 'present' + absent_owners: + - "{{ ad_object_1_object_id }}" + - "{{ ad_object_2_object_id }}" + +''' + +RETURN = ''' +object_id: + description: + - The object_id for the group. + type: str + returned: always + sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +display_name: + description: + - The display name of the group. + returned: always + type: str + sample: GroupName +mail_nickname: + description: + - The mail alias for the group. + returned: always + type: str + sample: groupname +mail_enabled: + description: + - Whether the group is mail-enabled. Must be false. This is because only pure security groups can be created using the Graph API. + returned: always + type: bool + sample: False +security_enabled: + description: + - Whether the group is security-enable. + returned: always + type: bool + sample: False +mail: + description: + - The primary email address of the group. + returned: always + type: str + sample: group@contoso.com +group_owners: + description: + - The owners of the group. + returned: always + type: list +group_members: + description: + - The members of the group. + returned: always + type: list +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBase + +try: + from msrestazure.azure_exceptions import CloudError + from azure.graphrbac.models import GraphErrorException + from azure.graphrbac.models import GroupCreateParameters +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMADGroup(AzureRMModuleBase): + def __init__(self): + + self.module_arg_spec = dict( + object_id=dict(type='str'), + display_name=dict(type='str'), + mail_nickname=dict(type='str'), + present_members=dict(type='list', elements='str'), + present_owners=dict(type='list', elements='str'), + absent_members=dict(type='list', elements='str'), + absent_owners=dict(type='list', elements='str'), + tenant=dict(type='str', required=True), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ), + ) + + self.tenant = None + self.display_name = None + self.mail_nickname = None + self.object_id = None + self.present_members = [] + self.present_owners = [] + self.absent_members = [] + self.absent_owners = [] + self.state = None + self.results = dict(changed=False) + + super(AzureRMADGroup, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=False, + supports_tags=False, + is_ad_resource=True) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()): + setattr(self, key, kwargs[key]) + + # TODO remove ad_groups return. Returns as one object always + ad_groups = [] + + try: + client = self.get_graphrbac_client(self.tenant) + ad_groups = [] + + if self.display_name and self.mail_nickname: + ad_groups = list(client.groups.list(filter="displayName eq '{0}' and mailNickname eq '{1}'".format(self.display_name, self.mail_nickname))) + + if ad_groups: + self.object_id = ad_groups[0].object_id + + elif self.object_id: + ad_groups = [client.groups.get(self.object_id)] + + if ad_groups: + if self.state == "present": + self.results["changed"] = False + elif self.state == "absent": + ad_groups = [client.groups.delete(self.object_id)] + self.results["changed"] = True + else: + if self.state == "present": + if self.display_name and self.mail_nickname: + ad_groups = [client.groups.create(GroupCreateParameters(display_name=self.display_name, mail_nickname=self.mail_nickname))] + self.results["changed"] = True + else: + raise ValueError('The group does not exist. Both display_name : {0} and mail_nickname : {1} must be passed to create a new group' + .format(self.display_name, self.mail_nickname)) + elif self.state == "absent": + self.results["changed"] = False + + if ad_groups[0] is not None: + self.update_members(ad_groups[0].object_id, client) + self.update_owners(ad_groups[0].object_id, client) + self.results.update(self.set_results(ad_groups[0], client)) + + except GraphErrorException as e: + self.fail(e) + except ValueError as e: + self.fail(e) + + return self.results + + def update_members(self, group_id, client): + + current_members = [] + + if self.present_members or self.absent_members: + current_members = [object.object_id for object in list(client.groups.get_group_members(group_id))] + + if self.present_members: + present_members_by_object_id = self.dictionary_from_object_urls(self.present_members) + + members_to_add = list(set(present_members_by_object_id.keys()) - set(current_members)) + + if members_to_add: + for member_object_id in members_to_add: + client.groups.add_member(group_id, present_members_by_object_id[member_object_id]) + + self.results["changed"] = True + + if self.absent_members: + members_to_remove = list(set(self.absent_members).intersection(set(current_members))) + + if members_to_remove: + for member in members_to_remove: + client.groups.remove_member(group_id, member) + self.results["changed"] = True + + def update_owners(self, group_id, client): + current_owners = [] + + if self.present_owners or self.absent_owners: + current_owners = [object.object_id for object in list(client.groups.list_owners(group_id))] + + if self.present_owners: + + present_owners_by_object_id = self.dictionary_from_object_urls(self.present_owners) + + owners_to_add = list(set(present_owners_by_object_id.keys()) - set(current_owners)) + + if owners_to_add: + for owner_object_id in owners_to_add: + client.groups.add_owner(group_id, present_owners_by_object_id[owner_object_id]) + self.results["changed"] = True + + if self.absent_owners: + owners_to_remove = list(set(self.absent_owners).intersection(set(current_owners))) + + if owners_to_remove: + for owner in owners_to_remove: + client.groups.remove_owner(group_id, owner) + self.results["changed"] = True + + def dictionary_from_object_urls(self, object_urls): + objects_by_object_id = {} + + for urls in object_urls: + object_id = urls.split("/")[-1] + objects_by_object_id[object_id] = urls + + return objects_by_object_id + + def application_to_dict(self, object): + return dict( + app_id=object.app_id, + object_id=object.object_id, + display_name=object.display_name, + ) + + def serviceprincipal_to_dict(self, object): + return dict( + app_id=object.app_id, + object_id=object.object_id, + app_display_name=object.display_name, + app_role_assignment_required=object.app_role_assignment_required + ) + + def group_to_dict(self, object): + return dict( + object_id=object.object_id, + display_name=object.display_name, + mail_nickname=object.mail_nickname, + mail_enabled=object.mail_enabled, + security_enabled=object.security_enabled, + mail=object.mail + ) + + def user_to_dict(self, object): + return dict( + object_id=object.object_id, + display_name=object.display_name, + user_principal_name=object.user_principal_name, + mail_nickname=object.mail_nickname, + mail=object.mail, + account_enabled=object.account_enabled, + user_type=object.user_type + ) + + def result_to_dict(self, object): + if object.object_type == "Group": + return self.group_to_dict(object) + elif object.object_type == "User": + return self.user_to_dict(object) + elif object.object_type == "Application": + return self.application_to_dict(object) + elif object.object_type == "ServicePrincipal": + return self.serviceprincipal_to_dict(object) + else: + return object.object_type + + def set_results(self, object, client): + results = self.group_to_dict(object) + + if results["object_id"] and (self.present_owners or self.absent_owners): + results["group_owners"] = [self.result_to_dict(object) for object in list(client.groups.list_owners(results["object_id"]))] + + if results["object_id"] and (self.present_members or self.absent_members): + results["group_members"] = [self.result_to_dict(object) for object in list(client.groups.get_group_members(results["object_id"]))] + + return results + + +def main(): + AzureRMADGroup() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adgroup_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adgroup_info.py new file mode 100644 index 000000000..9e8c5e456 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adgroup_info.py @@ -0,0 +1,323 @@ +#!/usr/bin/python +# +# Copyright (c) 2021 Cole Neubauer, (@coleneubauer) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +module: azure_rm_adgroup_info +version_added: "1.6.0" +short_description: Get Azure Active Directory group info +description: + - Get Azure Active Directory group info. +options: + tenant: + description: + - The tenant ID. + type: str + required: True + object_id: + description: + - The object id for the ad group. + - returns the group which has this object ID. + type: str + attribute_name: + description: + - The name of an attribute that you want to match to I(attribute_value). + - If I(attribute_name) is not a collection type it will return groups where I(attribute_name) is equal to I(attribute_value). + - If I(attribute_name) is a collection type it will return groups where I(attribute_value) is in I(attribute_name). + type: str + attribute_value: + description: + - The value to match attribute_name to. + - If I(attribute_name) is not a collection type it will return groups where I(attribute_name) is equal to I(attribute_value). + - If I(attribute_name) is a collection type it will groups users where I(attribute_value) is in I(attribute_name). + type: str + odata_filter: + description: + - returns groups based on the the OData filter passed into this parameter. + type: str + check_membership: + description: + - The object ID of the contact, group, user, or service principal to check for membership against returned groups. + type: str + return_owners: + description: + - Indicate whether the owners of a group should be returned with the returned groups. + default: False + type: bool + return_group_members: + description: + - Indicate whether the members of a group should be returned with the returned groups. + default: False + type: bool + return_member_groups: + description: + - Indicate whether the groups in which a groups is a member should be returned with the returned groups. + default: False + type: bool + all: + description: + - If True, will return all groups in tenant. + - If False will return no users. + - It is recommended that you instead identify a subset of groups and use filter. + default: False + type: bool +extends_documentation_fragment: + - azure.azcollection.azure +author: + - Cole Neubauer(@coleneubauer) +''' + +EXAMPLES = ''' + - name: Return a specific group using object_id + azure_rm_adgroup_info: + object_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + tenant: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + + - name: Return a specific group using object_id and return the owners of the group + azure_rm_adgroup_info: + object_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + return_owners: True + tenant: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + + - name: Return a specific group using object_id and return the owners and members of the group + azure_rm_adgroup_info: + object_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + return_owners: True + return_group_members: True + tenant: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + + - name: Return a specific group using object_id and return the groups the group is a member of + azure_rm_adgroup_info: + object_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + return_member_groups: True + tenant: "{{ tenant_id }}" + + - name: Return a specific group using object_id and check an ID for membership + azure_rm_adgroup_info: + object_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + check_membership: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + tenant: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + + - name: Return a specific group using displayName for attribute_name + azure_rm_adgroup_info: + attribute_name: "displayName" + attribute_value: "Display-Name-Of-AD-Group" + tenant: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + + - name: Return groups matching odata_filter + azure_rm_adgroup_info: + odata_filter: "mailNickname eq 'Mail-Nickname-Of-AD-Group'" + tenant: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + + - name: Return all groups + azure_rm_adgroup_info: + tenant: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + all: True + +''' + +RETURN = ''' +object_id: + description: + - The object_id for the group. + type: str + returned: always + sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +display_name: + description: + - The display name of the group. + returned: always + type: str + sample: GroupName +mail_nickname: + description: + - The mail alias for the group. + returned: always + type: str + sample: groupname +mail_enabled: + description: + - Whether the group is mail-enabled. Must be false. This is because only pure security groups can be created using the Graph API. + returned: always + type: bool + sample: False +security_enabled: + description: + - Whether the group is security-enable. + returned: always + type: bool + sample: False +mail: + description: + - The primary email address of the group. + returned: always + type: str + sample: group@contoso.com +group_owners: + description: + - The owners of the group. + returned: always + type: list +group_members: + description: + - The members of the group. + returned: always + type: list +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBase + +try: + from msrestazure.azure_exceptions import CloudError + from azure.graphrbac.models import GraphErrorException + from azure.graphrbac.models import CheckGroupMembershipParameters +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMADGroupInfo(AzureRMModuleBase): + def __init__(self): + + self.module_arg_spec = dict( + object_id=dict(type='str'), + attribute_name=dict(type='str'), + attribute_value=dict(type='str'), + odata_filter=dict(type='str'), + check_membership=dict(type='str'), + return_owners=dict(type='bool', default=False), + return_group_members=dict(type='bool', default=False), + return_member_groups=dict(type='bool', default=False), + all=dict(type='bool', default=False), + tenant=dict(type='str', required=True), + ) + + self.tenant = None + self.object_id = None + self.attribute_name = None + self.attribute_value = None + self.odata_filter = None + self.check_membership = None + self.return_owners = False + self.return_group_members = False + self.return_member_groups = False + self.all = False + + self.results = dict(changed=False) + + mutually_exclusive = [['odata_filter', 'attribute_name', 'object_id', 'all']] + required_together = [['attribute_name', 'attribute_value']] + required_one_of = [['odata_filter', 'attribute_name', 'object_id', 'all']] + + super(AzureRMADGroupInfo, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + mutually_exclusive=mutually_exclusive, + required_together=required_together, + required_one_of=required_one_of, + is_ad_resource=True) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()): + setattr(self, key, kwargs[key]) + + ad_groups = [] + + try: + client = self.get_graphrbac_client(self.tenant) + + if self.object_id is not None: + ad_groups = [client.groups.get(self.object_id)] + elif self.attribute_name is not None and self.attribute_value is not None: + ad_groups = list(client.groups.list(filter="{0} eq '{1}'".format(self.attribute_name, self.attribute_value))) + elif self.odata_filter is not None: # run a filter based on user input + ad_groups = list(client.groups.list(filter=self.odata_filter)) + elif self.all: + ad_groups = list(client.groups.list()) + + self.results['ad_groups'] = [self.set_results(group, client) for group in ad_groups] + + except GraphErrorException as e: + self.fail("failed to get ad group info {0}".format(str(e))) + + return self.results + + def application_to_dict(self, object): + return dict( + app_id=object.app_id, + object_id=object.object_id, + display_name=object.display_name, + ) + + def serviceprincipal_to_dict(self, object): + return dict( + app_id=object.app_id, + object_id=object.object_id, + app_display_name=object.display_name, + app_role_assignment_required=object.app_role_assignment_required + ) + + def group_to_dict(self, object): + return dict( + object_id=object.object_id, + display_name=object.display_name, + mail_nickname=object.mail_nickname, + mail_enabled=object.mail_enabled, + security_enabled=object.security_enabled, + mail=object.mail + ) + + def user_to_dict(self, object): + return dict( + object_id=object.object_id, + display_name=object.display_name, + user_principal_name=object.user_principal_name, + mail_nickname=object.mail_nickname, + mail=object.mail, + account_enabled=object.account_enabled, + user_type=object.user_type + ) + + def result_to_dict(self, object): + if object.object_type == "Group": + return self.group_to_dict(object) + elif object.object_type == "User": + return self.user_to_dict(object) + elif object.object_type == "Application": + return self.application_to_dict(object) + elif object.object_type == "ServicePrincipal": + return self.serviceprincipal_to_dict(object) + else: + return object.object_type + + def set_results(self, object, client): + results = self.group_to_dict(object) + + if results["object_id"] and self.return_owners: + results["group_owners"] = [self.result_to_dict(object) for object in list(client.groups.list_owners(results["object_id"]))] + + if results["object_id"] and self.return_group_members: + results["group_members"] = [self.result_to_dict(object) for object in list(client.groups.get_group_members(results["object_id"]))] + + if results["object_id"] and self.return_member_groups: + results["member_groups"] = [self.result_to_dict(object) for object in list(client.groups.get_member_groups(results["object_id"], False))] + + if results["object_id"] and self.check_membership: + results["is_member_of"] = client.groups.is_member_of( + CheckGroupMembershipParameters(group_id=results["object_id"], member_id=self.check_membership)).value + + return results + + +def main(): + AzureRMADGroupInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adpassword.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adpassword.py new file mode 100644 index 000000000..d56791864 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adpassword.py @@ -0,0 +1,288 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 Haiyuan Zhang, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +import datetime + +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_adpassword + +version_added: "0.2.0" + +short_description: Manage application password + +description: + - Manage application password. + +options: + app_id: + description: + - The application ID. + type: str + service_principal_object_id: + description: + - The service principal object ID. + type: str + key_id: + description: + - The password key ID. + type: str + tenant: + description: + - The tenant ID. + type: str + required: True + end_date: + description: + - Date or datemtime after which credentials expire. + - Default value is one year after current time. + type: str + value: + description: + - The application password value. + - Length greater than 18 characters. + type: str + app_object_id: + description: + - The application object ID. + type: str + state: + description: + - Assert the state of Active Dirctory Password. + - Use C(present) to create or update a Password and use C(absent) to delete. + - Update is not supported, if I(state=absent) and I(key_id=None), then all passwords of the application will be deleted. + default: present + choices: + - absent + - present + type: str + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + haiyuan_zhang (@haiyuazhang) + Fred-sun (@Fred-sun) + +''' + +EXAMPLES = ''' + - name: create ad password + azure_rm_adpassword: + app_id: "{{ app_id }}" + state: present + value: "$abc12345678" + tenant: "{{ tenant_id }}" +''' + +RETURN = ''' +end_date: + description: + - Date or datemtime after which credentials expire. + - Default value is one year after current time. + type: str + returned: always + sample: 2021-06-28T06:00:32.637070+00:00 +key_id: + description: + - The password key ID + type: str + returned: always + sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +start_date: + description: + - Date or datetime at which credentials become valid. + - Default value is current time. + type: str + returned: always + sample: 2020-06-28T06:00:32.637070+00:00 + +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +import uuid + +try: + from msrestazure.azure_exceptions import CloudError + from azure.graphrbac.models import GraphErrorException + from azure.graphrbac.models import PasswordCredential + from azure.graphrbac.models import ApplicationUpdateParameters + from dateutil.relativedelta import relativedelta +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMADPassword(AzureRMModuleBase): + def __init__(self): + + self.module_arg_spec = dict( + app_id=dict(type='str'), + service_principal_object_id=dict(type='str'), + app_object_id=dict(type='str'), + key_id=dict(type='str'), + tenant=dict(type='str', required=True), + value=dict(type='str'), + end_date=dict(type='str'), + state=dict(type='str', default='present', choices=['present', 'absent']), + ) + + self.state = None + self.tenant = None + self.app_id = None + self.service_principal_object_id = None + self.app_object_id = None + self.key_id = None + self.value = None + self.end_date = None + self.results = dict(changed=False) + + self.client = None + + super(AzureRMADPassword, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=False, + supports_tags=False, + is_ad_resource=True) + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()): + setattr(self, key, kwargs[key]) + + self.client = self.get_graphrbac_client(self.tenant) + self.resolve_app_obj_id() + passwords = self.get_all_passwords() + + if self.state == 'present': + if self.key_id and self.key_exists(passwords): + self.update(passwords) + else: + self.create_password(passwords) + else: + if self.key_id is None: + self.delete_all_passwords(passwords) + else: + self.delete_password(passwords) + + return self.results + + def key_exists(self, old_passwords): + for pd in old_passwords: + if pd.key_id == self.key_id: + return True + return False + + def resolve_app_obj_id(self): + try: + if self.app_object_id is not None: + return + elif self.app_id or self.service_principal_object_id: + if not self.app_id: + sp = self.client.service_principals.get(self.service_principal_object_id) + self.app_id = sp.app_id + if not self.app_id: + self.fail("can't resolve app via service principal object id {0}".format(self.service_principal_object_id)) + + result = list(self.client.applications.list(filter="appId eq '{0}'".format(self.app_id))) + if result: + self.app_object_id = result[0].object_id + else: + self.fail("can't resolve app via app id {0}".format(self.app_id)) + else: + self.fail("one of the [app_id, app_object_id, service_principal_id] must be set") + + except GraphErrorException as ge: + self.fail("error in resolve app_object_id {0}".format(str(ge))) + + def get_all_passwords(self): + + try: + return list(self.client.applications.list_password_credentials(self.app_object_id)) + except GraphErrorException as ge: + self.fail("failed to fetch passwords for app {0}: {1}".format(self.app_object_id, str(ge))) + + def delete_all_passwords(self, old_passwords): + + if len(old_passwords) == 0: + self.results['changed'] = False + return + try: + self.client.applications.patch(self.app_object_id, ApplicationUpdateParameters(password_credentials=[])) + self.results['changed'] = True + except GraphErrorException as ge: + self.fail("fail to purge all passwords for app: {0} - {1}".format(self.app_object_id, str(ge))) + + def delete_password(self, old_passwords): + if not self.key_exists(old_passwords): + self.results['changed'] = False + return + + num_of_passwords_before_delete = len(old_passwords) + + for pd in old_passwords: + if pd.key_id == self.key_id: + old_passwords.remove(pd) + break + try: + self.client.applications.patch(self.app_object_id, ApplicationUpdateParameters(password_credentials=old_passwords)) + num_of_passwords_after_delete = len(self.get_all_passwords()) + if num_of_passwords_after_delete != num_of_passwords_before_delete: + self.results['changed'] = True + + except GraphErrorException as ge: + self.fail("failed to delete password with key id {0} - {1}".format(self.app_id, str(ge))) + + def create_password(self, old_passwords): + + def gen_guid(): + return uuid.uuid4() + + if self.value is None: + self.fail("when creating a new password, module parameter value can't be None") + + start_date = datetime.datetime.now(datetime.timezone.utc) + end_date = self.end_date or start_date + relativedelta(years=1) + value = self.value + key_id = self.key_id or str(gen_guid()) + + new_password = PasswordCredential(start_date=start_date, end_date=end_date, key_id=key_id, + value=value, custom_key_identifier=None) + old_passwords.append(new_password) + + try: + client = self.get_graphrbac_client(self.tenant) + app_patch_parameters = ApplicationUpdateParameters(password_credentials=old_passwords) + client.applications.patch(self.app_object_id, app_patch_parameters) + + new_passwords = self.get_all_passwords() + for pd in new_passwords: + if pd.key_id == key_id: + self.results['changed'] = True + self.results.update(self.to_dict(pd)) + except GraphErrorException as ge: + self.fail("failed to create new password: {0}".format(str(ge))) + + def update_password(self, old_passwords): + self.fail("update existing password is not supported") + + def to_dict(self, pd): + return dict( + end_date=pd.end_date, + start_date=pd.start_date, + key_id=pd.key_id + ) + + +def main(): + AzureRMADPassword() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adpassword_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adpassword_info.py new file mode 100644 index 000000000..8fc99cb7e --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adpassword_info.py @@ -0,0 +1,209 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 Haiyuan Zhang, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +import datetime + +__metaclass__ = type + + +DOCUMENTATION = ''' +module: azure_rm_adpassword_info + +version_added: "0.2.0" + +short_description: Get application password info + +description: + - Get application password info. + +options: + app_id: + description: + - The application ID. + type: str + service_principal_object_id: + description: + - The service principal object ID. + type: str + key_id: + description: + - The password key ID. + type: str + tenant: + description: + - The tenant ID. + type: str + required: True + end_date: + description: + - Date or datemtime after which credentials expire. + - Default value is one year after current time. + type: str + value: + description: + - The application password value. + - Length greater than 18 characters. + type: str + app_object_id: + description: + - The application object ID. + type: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + haiyuan_zhang (@haiyuazhang) + Fred-sun (@Fred-sun) +''' + +EXAMPLES = ''' + - name: get ad password info + azure_rm_adpassword_info: + app_id: "{{ app_id }}" + tenant: "{{ tenant_id }}" + key_id: "{{ key_id }}" +''' + +RETURN = ''' +passwords: + description: + - The password info. + returned: success + type: dict + contains: + custom_key_identifier: + description: + - Custom key identifier. + type: str + returned: always + sample: None + end_date: + description: + - Date or datemtime after which credentials expire. + - Default value is one year after current time. + type: datetime + returned: always + sample: 2021-06-18T06:51:25.508304+00:00 + key_id: + description: + - The password key ID. + type: str + returned: always + sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + start_date: + description: + - Date or datetime at which credentials become valid. + - Default value is current time + type: datetime + returned: always + sample: 2020-06-18T06:51:25.508304+00:00 + +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from msrestazure.azure_exceptions import CloudError + from azure.graphrbac.models import GraphErrorException + from azure.graphrbac.models import PasswordCredential + from azure.graphrbac.models import ApplicationUpdateParameters +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMADPasswordInfo(AzureRMModuleBase): + def __init__(self): + + self.module_arg_spec = dict( + app_id=dict(type='str'), + app_object_id=dict(type='str'), + service_principal_object_id=dict(type='str'), + key_id=dict(type='str'), + tenant=dict(type='str', required=True), + value=dict(type='str'), + end_date=dict(type='str'), + ) + + self.tenant = None + self.app_id = None + self.service_principal_object_id = None + self.app_object_id = None + self.key_id = None + self.value = None + self.end_date = None + self.results = dict(changed=False) + + self.client = None + + super(AzureRMADPasswordInfo, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_tags=False, + supports_check_mode=True, + is_ad_resource=True) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()): + setattr(self, key, kwargs[key]) + + self.client = self.get_graphrbac_client(self.tenant) + self.resolve_app_obj_id() + passwords = self.get_all_passwords() + + if self.key_id: + filtered = [pd for pd in passwords if pd.key_id == self.key_id] + self.results['passwords'] = [self.to_dict(pd) for pd in filtered] + else: + self.results['passwords'] = [self.to_dict(pd) for pd in passwords] + + return self.results + + def resolve_app_obj_id(self): + try: + if self.app_object_id is not None: + return + elif self.app_id or self.service_principal_object_id: + if not self.app_id: + sp = self.client.service_principals.get(self.service_principal_id) + self.app_id = sp.app_id + if not self.app_id: + self.fail("can't resolve app via service principal object id {0}".format(self.service_principal_object_id)) + + result = list(self.client.applications.list(filter="appId eq '{0}'".format(self.app_id))) + if result: + self.app_object_id = result[0].object_id + else: + self.fail("can't resolve app via app id {0}".format(self.app_id)) + else: + self.fail("one of the [app_id, app_object_id, service_principal_id] must be set") + + except GraphErrorException as ge: + self.fail("error in resolve app_object_id {0}".format(str(ge))) + + def get_all_passwords(self): + + try: + return list(self.client.applications.list_password_credentials(self.app_object_id)) + except GraphErrorException as ge: + self.fail("failed to fetch passwords for app {0}: {1}".format(self.app_object_id, str(ge))) + + def to_dict(self, pd): + return dict( + end_date=pd.end_date, + start_date=pd.start_date, + key_id=pd.key_id, + custom_key_identifier=str(pd.custom_key_identifier) + ) + + +def main(): + AzureRMADPasswordInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adserviceprincipal.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adserviceprincipal.py new file mode 100644 index 000000000..ca5586e48 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adserviceprincipal.py @@ -0,0 +1,215 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 Haiyuan Zhang, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_adserviceprincipal + +version_added: "0.2.0" + +short_description: Manage Azure Active Directory service principal + +description: + - Manage Azure Active Directory service principal. + +options: + app_id: + description: + - The application ID. + type: str + required: True + tenant: + description: + - The tenant ID. + type: str + required: True + app_role_assignment_required: + description: + - Whether the Role of the Service Principal is set. + type: bool + state: + description: + - Assert the state of Active Dirctory service principal. + - Use C(present) to create or update a Password and use C(absent) to delete. + default: present + choices: + - absent + - present + type: str + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + haiyuan_zhang (@haiyuazhang) + Fred-sun (@Fred-sun) +''' + +EXAMPLES = ''' + - name: create ad sp + azure_rm_adserviceprincipal: + app_id: "{{ app_id }}" + state: present + tenant: "{{ tenant_id }}" +''' + +RETURN = ''' +app_display_name: + description: + - Object's display name or its prefix. + type: str + returned: always + sample: fredAKSCluster +app_id: + description: + - The application ID. + returned: always + type: str + sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +app_role_assignment_required: + description: + - Whether the Role of the Service Principal is set. + returned: always + type: bool + sample: false +object_id: + description: + - Object ID of the associated service principal. + returned: always + type: str + sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt +try: + from azure.graphrbac.models import ServicePrincipalCreateParameters + from azure.graphrbac.models import ServicePrincipalUpdateParameters +except Exception: + pass + +try: + from msrestazure.azure_exceptions import CloudError + from azure.graphrbac.models import GraphErrorException +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMADServicePrincipal(AzureRMModuleBaseExt): + def __init__(self): + + self.module_arg_spec = dict( + app_id=dict(type='str', required=True), + tenant=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + app_role_assignment_required=dict(type='bool') + ) + + self.state = None + self.tenant = None + self.app_id = None + self.app_role_assignment_required = None + self.object_id = None + self.results = dict(changed=False) + + super(AzureRMADServicePrincipal, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=False, + supports_tags=False, + is_ad_resource=True) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()): + setattr(self, key, kwargs[key]) + + response = self.get_resource() + + if response: + if self.state == 'present': + if self.check_update(response): + self.update_resource(response) + elif self.state == 'absent': + self.delete_resource(response) + else: + if self.state == 'present': + self.create_resource() + elif self.state == 'absent': + self.log("try to delete non exist resource") + + return self.results + + def create_resource(self): + try: + client = self.get_graphrbac_client(self.tenant) + response = client.service_principals.create(ServicePrincipalCreateParameters(app_id=self.app_id, account_enabled=True)) + self.results['changed'] = True + self.results.update(self.to_dict(response)) + return response + except GraphErrorException as ge: + self.fail("Error creating service principle, app id {0} - {1}".format(self.app_id, str(ge))) + + def update_resource(self, old_response): + try: + client = self.get_graphrbac_client(self.tenant) + to_update = {} + if self.app_role_assignment_required is not None: + to_update['app_role_assignment_required'] = self.app_role_assignment_required + + client.service_principals.update(old_response['object_id'], to_update) + self.results['changed'] = True + self.results.update(self.get_resource()) + + except GraphErrorException as ge: + self.fail("Error updating the service principal app_id {0} - {1}".format(self.app_id, str(ge))) + + def delete_resource(self, response): + try: + client = self.get_graphrbac_client(self.tenant) + client.service_principals.delete(response.get('object_id')) + self.results['changed'] = True + return True + except GraphErrorException as ge: + self.fail("Error deleting service principal app_id {0} - {1}".format(self.app_id, str(ge))) + + def get_resource(self): + try: + client = self.get_graphrbac_client(self.tenant) + result = list(client.service_principals.list(filter="servicePrincipalNames/any(c:c eq '{0}')".format(self.app_id))) + if not result: + return False + result = result[0] + return self.to_dict(result) + except GraphErrorException as ge: + self.log("Did not find the graph instance instance {0} - {1}".format(self.app_id, str(ge))) + return False + + def check_update(self, response): + app_assignment_changed = self.app_role_assignment_required is not None and \ + self.app_role_assignment_required != response.get('app_role_assignment_required', None) + + return app_assignment_changed + + def to_dict(self, object): + return dict( + app_id=object.app_id, + object_id=object.object_id, + app_display_name=object.display_name, + app_role_assignment_required=object.app_role_assignment_required + ) + + +def main(): + AzureRMADServicePrincipal() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adserviceprincipal_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adserviceprincipal_info.py new file mode 100644 index 000000000..80e30b47f --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adserviceprincipal_info.py @@ -0,0 +1,144 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 Haiyuan Zhang, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: azure_rm_adserviceprincipal_info + +version_added: "0.2.0" + +short_description: Get Azure Active Directory service principal info + +description: + - Get Azure Active Directory service principal info. + +options: + app_id: + description: + - The application ID. + type: str + tenant: + description: + - The tenant ID. + type: str + required: True + object_id: + description: + - It's service principal's object ID. + type: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + haiyuan_zhang (@haiyuazhang) + Fred-sun (@Fred-sun) +''' + +EXAMPLES = ''' + - name: get ad sp info + azure_rm_adserviceprincipal_info: + app_id: "{{ app_id }}" + tenant: "{{ tenant_id }}" + +''' + +RETURN = ''' +app_display_name: + description: + - Object's display name or its prefix. + type: str + returned: always + sample: sp +app_id: + description: + - The application ID. + returned: always + type: str + sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +app_role_assignment_required: + description: + - Whether the Role of the Service Principal is set. + type: bool + returned: always + sample: false +object_id: + description: + - It's service principal's object ID. + returned: always + type: str + sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + + +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBase + +try: + from msrestazure.azure_exceptions import CloudError + from azure.graphrbac.models import GraphErrorException +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMADServicePrincipalInfo(AzureRMModuleBase): + def __init__(self): + + self.module_arg_spec = dict( + app_id=dict(type='str'), + object_id=dict(type='str'), + tenant=dict(type='str', required=True), + ) + + self.tenant = None + self.app_id = None + self.object_id = None + self.results = dict(changed=False) + + super(AzureRMADServicePrincipalInfo, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + is_ad_resource=True) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()): + setattr(self, key, kwargs[key]) + + service_principals = [] + + try: + client = self.get_graphrbac_client(self.tenant) + if self.object_id is None: + service_principals = list(client.service_principals.list(filter="servicePrincipalNames/any(c:c eq '{0}')".format(self.app_id))) + else: + service_principals = [client.service_principals.get(self.object_id)] + + self.results['service_principals'] = [self.to_dict(sp) for sp in service_principals] + except GraphErrorException as ge: + self.fail("failed to get service principal info {0}".format(str(ge))) + + return self.results + + def to_dict(self, object): + return dict( + app_id=object.app_id, + object_id=object.object_id, + app_display_name=object.display_name, + app_role_assignment_required=object.app_role_assignment_required + ) + + +def main(): + AzureRMADServicePrincipalInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aduser.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aduser.py new file mode 100644 index 000000000..2bfa04aac --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aduser.py @@ -0,0 +1,413 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 Cole Neubauer, (@coleneubauer) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +module: azure_rm_aduser + +version_added: "1.5.0" + +short_description: Modify an Azure Active Directory user + +description: + - Create, delete, and update an Azure Active Directory user. + +options: + tenant: + description: + - The tenant ID. + type: str + required: True + state: + description: + - State of the ad user. Use C(present) to create or update an ad user and C(absent) to delete an ad user. + type: str + default: present + choices: + - absent + - present + object_id: + description: + - The object id for the user. + - Updates or deletes the user who has this object ID. + - Mutually exclusive with I(user_principal_name), I(attribute_name), and I(odata_filter). + type: str + account_enabled: + description: + - A boolean determing whether or not the user account is enabled. + - Used when either creating or updating a user account. + type: bool + display_name: + description: + - The display name of the user. + - Used when either creating or updating a user account. + type: str + given_name: + description: + - The given name for the user. + - Used when either creating or updating a user account. + type: str + surname: + description: + - The surname for the user. + - Used when either creating or updating a user account. + type: str + immutable_id: + description: + - The immutable_id of the user. + - Used when either creating or updating a user account. + type: str + mail: + description: + - The primary email address of the user. + - Used when either creating or updating a user account. + type: str + mail_nickname: + description: + - The mail alias for the user. + - Used when either creating or updating a user account. + type: str + password_profile: + description: + - The password for the user. + - Used when either creating or updating a user account. + type: str + usage_location: + description: + - A two letter country code, ISO standard 3166. + - Required for a user that will be assigned licenses due to legal requirement to check for availability of services in countries. + - Used when either creating or updating a user account. + type: str + user_type: + description: + - A string value that can be used to classify user types in your directory, such as Member and Guest. + - Used when either creating or updating a user account. + type: str + user_principal_name: + description: + - The principal name of the user. + - Creates, updates, or deletes the user who has this principal name. + - Mutually exclusive with I(object_id), I(attribute_name), and I(odata_filter). + type: str + attribute_name: + description: + - The name of an attribute that you want to match to I(attribute_value). + - If I(attribute_name) is not a collection type it will update or delete the user where I(attribute_name) is equal to I(attribute_value). + - If I(attribute_name) is a collection type it will update or delete the user where I(attribute_value) is in I(attribute_name). + - Mutually exclusive with I(object_id), I(user_principal_name), and I(odata_filter). + - Required together with I(attribute_value). + type: str + attribute_value: + description: + - The value to match I(attribute_name) to. + - If I(attribute_name) is not a collection type it will update or delete the user where I(attribute_name) is equal to I(attribute_value). + - If I(attribute_name) is a collection type it will update or delete the user where I(attribute_value) is in I(attribute_name). + - Required together with I(attribute_name). + type: str + odata_filter: + description: + - Filter that can be used to specify a user to update or delete. + - Mutually exclusive with I(object_id), I(attribute_name), and I(user_principal_name). + type: str +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Cole Neubauer(@coleneubauer) + +''' + +EXAMPLES = ''' +- name: Create user + azure_rm_aduser: + user_principal_name: "{{ user_id }}" + tenant: "{{ tenant_id }}" + state: "present" + account_enabled: "True" + display_name: "Test_{{ user_principal_name }}_Display_Name" + password_profile: "password" + mail_nickname: "Test_{{ user_principal_name }}_mail_nickname" + immutable_id: "{{ object_id }}" + given_name: "First" + surname: "Last" + user_type: "Member" + usage_location: "US" + mail: "{{ user_principal_name }}@contoso.com" + +- name: Update user with new value for account_enabled + azure_rm_aduser: + user_principal_name: "{{ user_id }}" + tenant: "{{ tenant_id }}" + state: "present" + account_enabled: "False" + +- name: Delete user + azure_rm_aduser: + user_principal_name: "{{ user_id }}" + tenant: "{{ tenant_id }}" + state: "absent" + +''' + +RETURN = ''' +object_id: + description: + - The object_id for the user. + type: str + returned: always + sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +display_name: + description: + - The display name of the user. + returned: always + type: str + sample: John Smith +user_principal_name: + description: + - The principal name of the user. + returned: always + type: str + sample: jsmith@contoso.com +mail_nickname: + description: + - The mail alias for the user. + returned: always + type: str + sample: jsmith +mail: + description: + - The primary email address of the user. + returned: always + type: str + sample: John.Smith@contoso.com +account_enabled: + description: + - Whether the account is enabled. + returned: always + type: bool + sample: False +user_type: + description: + - A string value that can be used to classify user types in your directory. + returned: always + type: str + sample: Member +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBase + +try: + from msrestazure.azure_exceptions import CloudError + from azure.graphrbac.models import UserUpdateParameters + from azure.graphrbac.models import UserCreateParameters + from azure.graphrbac.models import PasswordProfile + from azure.graphrbac.models import GraphErrorException +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMADUser(AzureRMModuleBase): + def __init__(self): + + self.module_arg_spec = dict( + user_principal_name=dict(type='str'), + state=dict(type='str', default='present', choices=['present', 'absent']), + object_id=dict(type='str'), + attribute_name=dict(type='str'), + attribute_value=dict(type='str'), + odata_filter=dict(type='str'), + account_enabled=dict(type='bool'), + display_name=dict(type='str'), + password_profile=dict(type='str', no_log=True), + mail_nickname=dict(type='str'), + immutable_id=dict(type='str'), + usage_location=dict(type='str'), + given_name=dict(type='str'), + surname=dict(type='str'), + user_type=dict(type='str'), + mail=dict(type='str'), + tenant=dict(type='str', required=True), + ) + + self.tenant = None + self.user_principal_name = None + self.state = None + self.object_id = None + self.attribute_name = None + self.attribute_value = None + self.odata_filter = None + self.account_enabled = None + self.display_name = None + self.password_profile = None + self.mail_nickname = None + self.immutable_id = None + self.usage_location = None + self.given_name = None + self.surname = None + self.user_type = None + self.mail = None + self.log_path = None + self.log_mode = None + + self.results = dict(changed=False) + + mutually_exclusive = [['odata_filter', 'attribute_name', 'object_id', 'user_principal_name']] + required_together = [['attribute_name', 'attribute_value']] + required_one_of = [['odata_filter', 'attribute_name', 'object_id', 'user_principal_name']] + + super(AzureRMADUser, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=False, + supports_tags=False, + mutually_exclusive=mutually_exclusive, + required_together=required_together, + required_one_of=required_one_of, + is_ad_resource=True) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()): + setattr(self, key, kwargs[key]) + + try: + client = self.get_graphrbac_client(self.tenant) + + ad_user = self.get_exisiting_user(client) + + if self.state == 'present': + + if ad_user: # Update, changed + + password = None + + if self.password_profile: + password = PasswordProfile(password=self.password_profile) + + should_update = False + + if self.immutable_id and ad_user.immutable_id != self.immutable_id: + should_update = True + if should_update or self.usage_location and ad_user.usage_location != self.usage_location: + should_update = True + if should_update or self.given_name and ad_user.given_name != self.given_name: + should_update = True + if should_update or self.surname and ad_user.surname != self.surname: + should_update = True + if should_update or self.user_type and ad_user.user_type != self.user_type: + should_update = True + if should_update or self.account_enabled is not None and ad_user.account_enabled != self.account_enabled: + should_update = True + if should_update or self.display_name and ad_user.display_name != self.display_name: + should_update = True + if should_update or password: + should_update = True + if should_update or self.user_principal_name and ad_user.user_principal_name != self.user_principal_name: + should_update = True + if should_update or self.mail_nickname and ad_user.mail_nickname != self.mail_nickname: + should_update = True + + if should_update: + parameters = UserUpdateParameters(immutable_id=self.immutable_id, + usage_location=self.usage_location, + given_name=self.given_name, + surname=self.surname, + user_type=self.user_type, + account_enabled=self.account_enabled, + display_name=self.display_name, + password_profile=password, + user_principal_name=self.user_principal_name, + mail_nickname=self.mail_nickname) + + client.users.update(upn_or_object_id=ad_user.object_id, parameters=parameters) + + self.results['changed'] = True + + # Get the updated versions of the users to return + # the update method, has no return value so it needs to be explicitely returned in a call + ad_user = self.get_exisiting_user(client) + + else: + self.results['changed'] = False + + else: # Create, changed + password = PasswordProfile(password=self.password_profile) + parameters = UserCreateParameters(account_enabled=self.account_enabled, + display_name=self.display_name, + password_profile=password, + user_principal_name=self.user_principal_name, + mail_nickname=self.mail_nickname, + immutable_id=self.immutable_id, + usage_location=self.usage_location, + given_name=self.given_name, + surname=self.surname, + user_type=self.user_type, + mail=self.mail) + ad_user = client.users.create(parameters=parameters) + self.results['changed'] = True + + self.results['ad_user'] = self.to_dict(ad_user) + + elif self.state == 'absent': + if ad_user: # Delete, changed + client.users.delete(ad_user.object_id) + self.results['changed'] = True + else: # Do nothing unchanged + self.results['changed'] = False + + except GraphErrorException as e: + self.fail("failed to get ad user info {0}".format(str(e))) + + return self.results + + def get_exisiting_user(self, client): + ad_user = None + + try: + if self.user_principal_name is not None: + ad_user = client.users.get(self.user_principal_name) + elif self.object_id is not None: + ad_user = client.users.get(self.object_id) + elif self.attribute_name is not None and self.attribute_value is not None: + try: + ad_user = list(client.users.list(filter="{0} eq '{1}'".format(self.attribute_name, self.attribute_value)))[0] + except GraphErrorException as e: + # the type doesn't get more specific. Could check the error message but no guarantees that message doesn't change in the future + # more stable to try again assuming the first error came from the attribute being a list + try: + ad_user = list(client.users.list(filter="{0}/any(c:c eq '{1}')".format(self.attribute_name, self.attribute_value)))[0] + except GraphErrorException as sub_e: + raise + elif self.odata_filter is not None: # run a filter based on user input to return based on any given attribute/query + ad_user = list(client.users.list(filter=self.odata_filter))[0] + except GraphErrorException as e: + # User was not found + err_msg = str(e) + if err_msg == "Resource '{0}' does not exist or one of its queried reference-property objects are not present.".format(self.user_principal_name): + ad_user = None + else: + raise + return ad_user + + def to_dict(self, object): + return dict( + object_id=object.object_id, + display_name=object.display_name, + user_principal_name=object.user_principal_name, + mail_nickname=object.mail_nickname, + mail=object.mail, + account_enabled=object.account_enabled, + user_type=object.user_type + ) + + +def main(): + AzureRMADUser() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aduser_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aduser_info.py new file mode 100644 index 000000000..36d27d547 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aduser_info.py @@ -0,0 +1,252 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 Cole Neubauer, (@coleneubauer) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +module: azure_rm_aduser_info + +version_added: "1.4.0" + +short_description: Get Azure Active Directory user info + +description: + - Get Azure Active Directory user info. + +options: + tenant: + description: + - The tenant ID. + type: str + required: True + object_id: + description: + - The object id for the user. + - returns the user who has this object ID. + - Mutually exclusive with I(user_principal_name), I(attribute_name), I(odata_filter) and I(all). + type: str + user_principal_name: + description: + - The principal name of the user. + - returns the user who has this principal name. + - Mutually exclusive with I(object_id), I(attribute_name), I(odata_filter) and I(all). + type: str + attribute_name: + description: + - The name of an attribute that you want to match to attribute_value. + - If I(attribute_name) is not a collection type it will return users where I(attribute_name) is equal to I(attribute_value). + - If I(attribute_name) is a collection type it will return users where I(attribute_value) is in I(attribute_name). + - Mutually exclusive with I(object_id), I(user_principal_name), I(odata_filter) and I(all). + - Required together with I(attribute_value). + type: str + attribute_value: + description: + - The value to match attribute_name to. + - If I(attribute_name) is not a collection type it will return users where I(attribute_name) is equal to I(attribute_value). + - If I(attribute_name) is a collection type it will return users where I(attribute_value) is in I(attribute_name). + - Required together with I(attribute_name). + type: str + odata_filter: + description: + - Returns users based on the the OData filter passed into this parameter. + - Mutually exclusive with I(object_id), I(attribute_name), I(user_principal_name) and I(all). + type: str + all: + description: + - If C(True), will return all users in tenant. + - If C(False) will return no users. + - It is recommended that you instead identify a subset of users and use filter. + - Mutually exclusive with I(object_id), I(attribute_name), I(odata_filter) and I(user_principal_name). + type: bool +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Cole Neubauer(@coleneubauer) + +''' + +EXAMPLES = ''' + - name: Using user_principal_name + azure.azcollection.azure_rm_aduser_info: + user_principal_name: user@contoso.com + tenant: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + + - name: Using object_id + azure.azcollection.azure_rm_aduser_info: + object_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + tenant: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + + - name: Using attribute mailNickname - not a collection + azure.azcollection.azure_rm_aduser_info: + attribute_name: mailNickname + attribute_value: users_mailNickname + tenant: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + + - name: Using attribute proxyAddresses - a collection + azure.azcollection.azure_rm_aduser_info: + attribute_name: proxyAddresses + attribute_value: SMTP:user@contoso.com + tenant: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + + - name: Using Filter mailNickname + azure.azcollection.azure_rm_aduser_info: + odata_filter: mailNickname eq 'user@contoso.com' + tenant: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + + - name: Using Filter proxyAddresses + azure.azcollection.azure_rm_aduser_info: + odata_filter: proxyAddresses/any(c:c eq 'SMTP:user@contoso.com') + tenant: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +''' + +RETURN = ''' +object_id: + description: + - The object_id for the user. + type: str + returned: always + sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +display_name: + description: + - The display name of the user. + returned: always + type: str + sample: John Smith +user_principal_name: + description: + - The principal name of the user. + returned: always + type: str + sample: jsmith@contoso.com +mail_nickname: + description: + - The mail alias for the user. + returned: always + type: str + sample: jsmith +mail: + description: + - The primary email address of the user. + returned: always + type: str + sample: John.Smith@contoso.com +account_enabled: + description: + - Whether the account is enabled. + returned: always + type: bool + sample: False +user_type: + description: + - A string value that can be used to classify user types in your directory. + returned: always + type: str + sample: Member +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBase + +try: + from msrestazure.azure_exceptions import CloudError + from azure.graphrbac.models import GraphErrorException +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMADUserInfo(AzureRMModuleBase): + def __init__(self): + + self.module_arg_spec = dict( + user_principal_name=dict(type='str'), + object_id=dict(type='str'), + attribute_name=dict(type='str'), + attribute_value=dict(type='str'), + odata_filter=dict(type='str'), + all=dict(type='bool'), + tenant=dict(type='str', required=True), + ) + + self.tenant = None + self.user_principal_name = None + self.object_id = None + self.attribute_name = None + self.attribute_value = None + self.odata_filter = None + self.all = None + self.log_path = None + self.log_mode = None + + self.results = dict(changed=False) + + mutually_exclusive = [['odata_filter', 'attribute_name', 'object_id', 'user_principal_name', 'all']] + required_together = [['attribute_name', 'attribute_value']] + required_one_of = [['odata_filter', 'attribute_name', 'object_id', 'user_principal_name', 'all']] + + super(AzureRMADUserInfo, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + mutually_exclusive=mutually_exclusive, + required_together=required_together, + required_one_of=required_one_of, + is_ad_resource=True) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()): + setattr(self, key, kwargs[key]) + + ad_users = [] + + try: + client = self.get_graphrbac_client(self.tenant) + + if self.user_principal_name is not None: + ad_users = [client.users.get(self.user_principal_name)] + elif self.object_id is not None: + ad_users = [client.users.get(self.object_id)] + elif self.attribute_name is not None and self.attribute_value is not None: + try: + ad_users = list(client.users.list(filter="{0} eq '{1}'".format(self.attribute_name, self.attribute_value))) + except GraphErrorException as e: + # the type doesn't get more specific. Could check the error message but no guarantees that message doesn't change in the future + # more stable to try again assuming the first error came from the attribute being a list + try: + ad_users = list(client.users.list(filter="{0}/any(c:c eq '{1}')".format(self.attribute_name, self.attribute_value))) + except GraphErrorException as sub_e: + raise + elif self.odata_filter is not None: # run a filter based on user input to return based on any given attribute/query + ad_users = list(client.users.list(filter=self.odata_filter)) + elif self.all: + ad_users = list(client.users.list()) + + self.results['ad_users'] = [self.to_dict(user) for user in ad_users] + + except GraphErrorException as e: + self.fail("failed to get ad user info {0}".format(str(e))) + + return self.results + + def to_dict(self, object): + return dict( + object_id=object.object_id, + display_name=object.display_name, + user_principal_name=object.user_principal_name, + mail_nickname=object.mail_nickname, + mail=object.mail, + account_enabled=object.account_enabled, + user_type=object.user_type + ) + + +def main(): + AzureRMADUserInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aks.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aks.py new file mode 100644 index 000000000..56e4fae2d --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aks.py @@ -0,0 +1,1173 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Sertac Ozercan, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_aks +version_added: "0.1.2" +short_description: Manage a managed Azure Container Service (AKS) instance +description: + - Create, update and delete a managed Azure Container Service (AKS) instance. + +options: + resource_group: + description: + - Name of a resource group where the managed Azure Container Services (AKS) exists or will be created. + required: true + name: + description: + - Name of the managed Azure Container Services (AKS) instance. + required: true + state: + description: + - Assert the state of the AKS. Use C(present) to create or update an AKS and C(absent) to delete it. + default: present + choices: + - absent + - present + location: + description: + - Valid azure location. Defaults to location of the resource group. + dns_prefix: + description: + - DNS prefix specified when creating the managed cluster. + kubernetes_version: + description: + - Version of Kubernetes specified when creating the managed cluster. + linux_profile: + description: + - The Linux profile suboptions. + - Optional, provide if you need an ssh access to the cluster nodes. + suboptions: + admin_username: + description: + - The Admin Username for the cluster. + required: true + ssh_key: + description: + - The Public SSH Key used to access the cluster. + required: true + agent_pool_profiles: + description: + - The agent pool profile suboptions. + suboptions: + name: + description: + - Unique name of the agent pool profile in the context of the subscription and resource group. + required: true + count: + description: + - Number of agents (VMs) to host docker containers. + - Allowed values must be in the range of C(1) to C(100) (inclusive). + required: true + vm_size: + description: + - The VM Size of each of the Agent Pool VM's (e.g. C(Standard_F1) / C(Standard_D2v2)). + required: true + os_disk_size_gb: + description: + - Size of the OS disk. + enable_auto_scaling: + description: + - To enable auto-scaling. + type: bool + max_count: + description: + - Maximum number of nodes for auto-scaling. + - Required if I(enable_auto_scaling=True). + type: int + min_count: + description: + - Minmum number of nodes for auto-scaling. + - Required if I(enable_auto_scaling=True). + type: int + max_pods: + description: + - Maximum number of pods schedulable on nodes. + type: int + type: + description: + - AgentPoolType represents types of an agent pool. + - Possible values include C(VirtualMachineScaleSets) and C(AvailabilitySet). + choices: + - 'VirtualMachineScaleSets' + - 'AvailabilitySet' + type: str + mode: + description: + - AgentPoolMode represents mode of an agent pool. + - Possible values include C(System) and C(User). + - System AgentPoolMode requires a minimum VM SKU of at least 2 vCPUs and 4GB memory. + choices: + - 'System' + - 'User' + type: str + orchestrator_version: + description: + - Version of kubernetes running on the node pool. + type: str + node_labels: + description: + - Agent pool node labels to be persisted across all nodes in agent pool. + type: dict + vnet_subnet_id: + description: + - Specifies the VNet's subnet identifier. + type: str + availability_zones: + description: + - Availability zones for nodes. Must use VirtualMachineScaleSets AgentPoolType. + type: list + elements: int + choices: + - 1 + - 2 + - 3 + service_principal: + description: + - The service principal suboptions. If not provided - use system-assigned managed identity. + suboptions: + client_id: + description: + - The ID for the Service Principal. + required: true + client_secret: + description: + - The secret password associated with the service principal. + enable_rbac: + description: + - Enable RBAC. + - Existing non-RBAC enabled AKS clusters cannot currently be updated for RBAC use. + type: bool + default: no + network_profile: + description: + - Profile of network configuration. + suboptions: + network_plugin: + description: + - Network plugin used for building Kubernetes network. + - This property cannot been changed. + - With C(kubenet), nodes get an IP address from the Azure virtual network subnet. + - AKS features such as Virtual Nodes or network policies aren't supported with C(kubenet). + - C(azure) enables Azure Container Networking Interface(CNI), every pod gets an IP address from the subnet and can be accessed directly. + default: kubenet + choices: + - azure + - kubenet + network_policy: + description: Network policy used for building Kubernetes network. + choices: + - azure + - calico + pod_cidr: + description: + - A CIDR notation IP range from which to assign pod IPs when I(network_plugin=kubenet) is used. + - It should be a large address space that isn't in use elsewhere in your network environment. + - This address range must be large enough to accommodate the number of nodes that you expect to scale up to. + default: "10.244.0.0/16" + service_cidr: + description: + - A CIDR notation IP range from which to assign service cluster IPs. + - It must not overlap with any Subnet IP ranges. + - It should be the *.10 address of your service IP address range. + default: "10.0.0.0/16" + dns_service_ip: + description: + - An IP address assigned to the Kubernetes DNS service. + - It must be within the Kubernetes service address range specified in serviceCidr. + default: "10.0.0.10" + docker_bridge_cidr: + description: + - A CIDR notation IP range assigned to the Docker bridge network. + - It must not overlap with any Subnet IP ranges or the Kubernetes service address range. + default: "172.17.0.1/16" + load_balancer_sku: + description: + - The load balancer sku for the managed cluster. + choices: + - standard + - basic + outbound_type: + description: + - How outbound traffic will be configured for a cluster. + type: str + choices: + - loadBalancer + - userDefinedRouting + api_server_access_profile: + description: + - Profile of API Access configuration. + suboptions: + authorized_ip_ranges: + description: + - Authorized IP Ranges to kubernetes API server. + - Cannot be enabled when using private cluster + type: list + elements: str + enable_private_cluster: + description: + - Whether to create the cluster as a private cluster or not. + - Cannot be changed for an existing cluster. + type: bool + aad_profile: + description: + - Profile of Azure Active Directory configuration. + suboptions: + client_app_id: + description: The client AAD application ID. + server_app_id: + description: The server AAD application ID. + server_app_secret: + description: The server AAD application secret. + tenant_id: + description: + - The AAD tenant ID to use for authentication. + - If not specified, will use the tenant of the deployment subscription. + managed: + description: + - Whether to enable manged AAD. + type: bool + default: false + admin_group_object_ids: + description: + - AAD group object IDs that will have admin role of the cluster. + type: list + elements: str + addon: + description: + - Profile of managed cluster add-on. + - Key can be C(http_application_routing), C(monitoring), C(virtual_node). + - Value must be a dict contains a bool variable C(enabled). + type: dict + suboptions: + http_application_routing: + description: + - The HTTP application routing solution makes it easy to access applications that are deployed to your cluster. + type: dict + suboptions: + enabled: + description: + - Whether the solution enabled. + type: bool + monitoring: + description: + - It gives you performance visibility by collecting memory and processor metrics from controllers, nodes, + and containers that are available in Kubernetes through the Metrics API. + type: dict + suboptions: + enabled: + description: + - Whether the solution enabled. + type: bool + log_analytics_workspace_resource_id: + description: + - Where to store the container metrics. + required: true + virtual_node: + description: + - With virtual nodes, you have quick provisioning of pods, and only pay per second for their execution time. + - You don't need to wait for Kubernetes cluster autoscaler to deploy VM compute nodes to run the additional pods. + type: dict + suboptions: + enabled: + description: + - Whether the solution enabled. + type: bool + subnet_resource_id: + description: + - Subnet associated to the cluster. + required: true + node_resource_group: + description: + - Name of the resource group containing agent pool nodes. + - Unable to update. + type: str + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Sertac Ozercan (@sozercan) + - Yuwei Zhou (@yuwzho) + +''' + +EXAMPLES = ''' + - name: Create an AKS instance With A System Node Pool & A User Node Pool + azure_rm_aks: + name: myAKS + resource_group: myResourceGroup + location: eastus + dns_prefix: akstest + kubernetes_version: 1.14.6 + linux_profile: + admin_username: azureuser + ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAA... + service_principal: + client_id: "cf72ca99-f6b9-4004-b0e0-bee10c521948" + client_secret: "Password1234!" + agent_pool_profiles: + - name: default + count: 1 + vm_size: Standard_B2s + enable_auto_scaling: True + type: VirtualMachineScaleSets + mode: System + max_count: 3 + min_count: 1 + enable_rbac: yes + - name: user + count: 1 + vm_size: Standard_D2_v2 + enable_auto_scaling: True + type: VirtualMachineScaleSets + mode: User + max_count: 3 + min_count: 1 + enable_rbac: yes + + - name: Create a managed Azure Container Services (AKS) instance + azure_rm_aks: + name: myAKS + location: eastus + resource_group: myResourceGroup + dns_prefix: akstest + kubernetes_version: 1.14.6 + linux_profile: + admin_username: azureuser + ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAA... + service_principal: + client_id: "cf72ca99-f6b9-4004-b0e0-bee10c521948" + client_secret: "Password123!" + agent_pool_profiles: + - name: default + count: 5 + mode: System + vm_size: Standard_B2s + tags: + Environment: Production + + - name: Use minimal parameters and system-assigned identity + azure_rm_aks: + name: myMinimalCluster + location: eastus + resource_group: myExistingResourceGroup + dns_prefix: akstest + agent_pool_profiles: + - name: default + count: 1 + vm_size: Standard_D2_v2 + + - name: Create AKS with userDefinedRouting "Link:https://docs.microsoft.com/en-us/azure/aks/limit-egress-traffic#add-a-dnat-rule-to-azure-firewall" + azure_rm_aks: + name: "minimal{{ rpfx }}" + location: eastus + resource_group: "{{ resource_group }}" + kubernetes_version: "{{ versions.azure_aks_versions[0] }}" + dns_prefix: "aks{{ rpfx }}" + service_principal: + client_id: "{{ client_id }}" + client_secret: "{{ client_secret }}" + network_profile: + network_plugin: azure + load_balancer_sku: standard + outbound_type: userDefinedRouting + service_cidr: "10.41.0.0/16" + dns_service_ip: "10.41.0.10" + docker_bridge_cidr: "172.17.0.1/16" + api_server_access_profile: + authorized_ip_ranges: + - "20.106.246.252/32" + enable_private_cluster: no + agent_pool_profiles: + - name: default + count: 1 + vm_size: Standard_B2s + mode: System + vnet_subnet_id: "{{ output.subnets[0].id }}" + type: VirtualMachineScaleSets + enable_auto_scaling: false + + - name: Remove a managed Azure Container Services (AKS) instance + azure_rm_aks: + name: myAKS + resource_group: myResourceGroup + state: absent +''' +RETURN = ''' +state: + description: Current state of the Azure Container Service (AKS). + returned: always + type: dict + example: + agent_pool_profiles: + - count: 1 + dns_prefix: Null + name: default + os_disk_size_gb: Null + os_type: Linux + moode: System + node_labels: { "environment": "dev", "release": "stable" } + ports: Null + storage_profile: ManagedDisks + vm_size: Standard_B2s + vnet_subnet_id: Null + changed: false + dns_prefix: aks9860bdcd89 + id: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/Microsoft.ContainerService/managedClusters/aks9860bdc" + kube_config: "......" + kubernetes_version: 1.14.6 + linux_profile: + admin_username: azureuser + ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADA..... + location: eastus + name: aks9860bdc + provisioning_state: Succeeded + service_principal_profile: + client_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + tags: {} + type: Microsoft.ContainerService/ManagedClusters +''' +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + + +def create_aks_dict(aks): + ''' + Helper method to deserialize a ContainerService to a dict + :param: aks: ContainerService or AzureOperationPoller with the Azure callback object + :return: dict with the state on Azure + ''' + + return dict( + id=aks.id, + name=aks.name, + location=aks.location, + dns_prefix=aks.dns_prefix, + kubernetes_version=aks.kubernetes_version, + tags=aks.tags, + linux_profile=create_linux_profile_dict(aks.linux_profile), + service_principal_profile=create_service_principal_profile_dict( + aks.service_principal_profile), + provisioning_state=aks.provisioning_state, + agent_pool_profiles=create_agent_pool_profiles_dict( + aks.agent_pool_profiles), + type=aks.type, + kube_config=aks.kube_config, + enable_rbac=aks.enable_rbac, + network_profile=create_network_profiles_dict(aks.network_profile), + aad_profile=create_aad_profiles_dict(aks.aad_profile), + api_server_access_profile=create_api_server_access_profile_dict(aks.api_server_access_profile), + addon=create_addon_dict(aks.addon_profiles), + fqdn=aks.fqdn, + node_resource_group=aks.node_resource_group + ) + + +def create_network_profiles_dict(network): + return dict( + network_plugin=network.network_plugin, + network_policy=network.network_policy, + pod_cidr=network.pod_cidr, + service_cidr=network.service_cidr, + dns_service_ip=network.dns_service_ip, + docker_bridge_cidr=network.docker_bridge_cidr, + load_balancer_sku=network.load_balancer_sku, + outbound_type=network.outbound_type + ) if network else dict() + + +def create_aad_profiles_dict(aad): + return aad.as_dict() if aad else dict() + + +def create_api_server_access_profile_dict(api_server): + return api_server.as_dict() if api_server else dict() + + +def create_addon_dict(addon): + result = dict() + addon = addon or dict() + for key in addon.keys(): + result[key] = addon[key].config + if result[key] is None: + result[key] = {} + result[key]['enabled'] = addon[key].enabled + return result + + +def create_linux_profile_dict(linuxprofile): + ''' + Helper method to deserialize a ContainerServiceLinuxProfile to a dict + :param: linuxprofile: ContainerServiceLinuxProfile with the Azure callback object + :return: dict with the state on Azure + ''' + if linuxprofile: + return dict( + ssh_key=linuxprofile.ssh.public_keys[0].key_data, + admin_username=linuxprofile.admin_username + ) + else: + return None + + +def create_service_principal_profile_dict(serviceprincipalprofile): + ''' + Helper method to deserialize a ContainerServiceServicePrincipalProfile to a dict + Note: For security reason, the service principal secret is skipped on purpose. + :param: serviceprincipalprofile: ContainerServiceServicePrincipalProfile with the Azure callback object + :return: dict with the state on Azure + ''' + return dict( + client_id=serviceprincipalprofile.client_id + ) + + +def create_agent_pool_profiles_dict(agentpoolprofiles): + ''' + Helper method to deserialize a ContainerServiceAgentPoolProfile to a dict + :param: agentpoolprofiles: ContainerServiceAgentPoolProfile with the Azure callback object + :return: dict with the state on Azure + ''' + return [dict( + count=profile.count, + vm_size=profile.vm_size, + name=profile.name, + os_disk_size_gb=profile.os_disk_size_gb, + vnet_subnet_id=profile.vnet_subnet_id, + availability_zones=profile.availability_zones, + os_type=profile.os_type, + type=profile.type, + mode=profile.mode, + orchestrator_version=profile.orchestrator_version, + enable_auto_scaling=profile.enable_auto_scaling, + max_count=profile.max_count, + node_labels=profile.node_labels, + min_count=profile.min_count, + max_pods=profile.max_pods + ) for profile in agentpoolprofiles] if agentpoolprofiles else None + + +def create_addon_profiles_spec(): + ''' + Helper method to parse the ADDONS dictionary and generate the addon spec + ''' + spec = dict() + for key in ADDONS.keys(): + values = ADDONS[key] + addon_spec = dict( + enabled=dict(type='bool', default=True) + ) + configs = values.get('config') or {} + for item in configs.keys(): + addon_spec[item] = dict(type='str', aliases=[configs[item]], required=True) + spec[key] = dict(type='dict', options=addon_spec, aliases=[values['name']]) + return spec + + +ADDONS = { + 'http_application_routing': dict(name='httpApplicationRouting'), + 'monitoring': dict(name='omsagent', config={'log_analytics_workspace_resource_id': 'logAnalyticsWorkspaceResourceID'}), + 'virtual_node': dict(name='aciConnector', config={'subnet_resource_id': 'SubnetName'}) +} + + +linux_profile_spec = dict( + admin_username=dict(type='str', required=True), + ssh_key=dict(type='str', no_log=True, required=True) +) + + +service_principal_spec = dict( + client_id=dict(type='str', required=True), + client_secret=dict(type='str', no_log=True) +) + + +agent_pool_profile_spec = dict( + name=dict(type='str', required=True), + count=dict(type='int', required=True), + vm_size=dict(type='str', required=True), + os_disk_size_gb=dict(type='int'), + dns_prefix=dict(type='str'), + ports=dict(type='list', elements='int'), + storage_profiles=dict(type='str', choices=[ + 'StorageAccount', 'ManagedDisks']), + vnet_subnet_id=dict(type='str'), + availability_zones=dict(type='list', elements='int', choices=[1, 2, 3]), + os_type=dict(type='str', choices=['Linux', 'Windows']), + orchestrator_version=dict(type='str', required=False), + type=dict(type='str', choice=['VirtualMachineScaleSets', 'AvailabilitySet']), + mode=dict(type='str', choice=['System', 'User']), + enable_auto_scaling=dict(type='bool'), + max_count=dict(type='int'), + node_labels=dict(type='dict'), + min_count=dict(type='int'), + max_pods=dict(type='int') +) + + +network_profile_spec = dict( + network_plugin=dict(type='str', choices=['azure', 'kubenet']), + network_policy=dict(type='str'), + pod_cidr=dict(type='str'), + service_cidr=dict(type='str'), + dns_service_ip=dict(type='str'), + docker_bridge_cidr=dict(type='str'), + load_balancer_sku=dict(type='str'), + outbound_type=dict(type='str', default='loadBalancer', choices=['userDefinedRouting', 'loadBalancer']) +) + + +aad_profile_spec = dict( + client_app_id=dict(type='str'), + server_app_id=dict(type='str'), + server_app_secret=dict(type='str', no_log=True), + tenant_id=dict(type='str'), + managed=dict(type='bool', default='false'), + admin_group_object_ids=dict(type='list', elements='str') +) + + +api_server_access_profile_spec = dict( + authorized_ip_ranges=dict(type='list', elements='str'), + enable_private_cluster=dict(type='bool'), +) + + +class AzureRMManagedCluster(AzureRMModuleBase): + """Configuration class for an Azure RM container service (AKS) resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ), + location=dict( + type='str' + ), + dns_prefix=dict( + type='str' + ), + kubernetes_version=dict( + type='str' + ), + linux_profile=dict( + type='dict', + options=linux_profile_spec + ), + agent_pool_profiles=dict( + type='list', + elements='dict', + options=agent_pool_profile_spec + ), + service_principal=dict( + type='dict', + options=service_principal_spec + ), + enable_rbac=dict( + type='bool', + default=False + ), + network_profile=dict( + type='dict', + options=network_profile_spec + ), + aad_profile=dict( + type='dict', + options=aad_profile_spec + ), + addon=dict( + type='dict', + options=create_addon_profiles_spec() + ), + api_server_access_profile=dict( + type='dict', + options=api_server_access_profile_spec + ), + node_resource_group=dict( + type='str' + ) + ) + + self.resource_group = None + self.name = None + self.location = None + self.dns_prefix = None + self.kubernetes_version = None + self.tags = None + self.state = None + self.linux_profile = None + self.agent_pool_profiles = None + self.service_principal = None + self.enable_rbac = False + self.network_profile = None + self.aad_profile = None + self.api_server_access_profile = None + self.addon = None + self.node_resource_group = None + + required_if = [ + ('state', 'present', [ + 'dns_prefix', 'agent_pool_profiles']) + ] + + self.results = dict(changed=False) + + super(AzureRMManagedCluster, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True, + required_if=required_if) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + resource_group = None + to_be_updated = False + update_tags = False + update_agentpool = False + + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + self.location = resource_group.location + + response = self.get_aks() + + # Check if the AKS instance already present in the RG + if self.state == 'present': + available_versions = self.get_all_versions() + if not response: + to_be_updated = True + if self.kubernetes_version not in available_versions.keys(): + self.fail("Unsupported kubernetes version. Expected one of {0} but got {1}".format(available_versions.keys(), self.kubernetes_version)) + else: + self.results = response + self.results['changed'] = False + self.log('Results : {0}'.format(response)) + update_tags, response['tags'] = self.update_tags(response['tags']) + + if response['provisioning_state'] == "Succeeded": + + def is_property_changed(profile, property, ignore_case=False): + base = response[profile].get(property) + new = getattr(self, profile).get(property) + if ignore_case: + return base.lower() != new.lower() + else: + return base != new + + # Cannot Update the SSH Key for now // Let service to handle it + if self.linux_profile and is_property_changed('linux_profile', 'ssh_key'): + self.log(("Linux Profile Diff SSH, Was {0} / Now {1}" + .format(response['linux_profile']['ssh_key'], self.linux_profile.get('ssh_key')))) + to_be_updated = True + # self.module.warn("linux_profile.ssh_key cannot be updated") + + # self.log("linux_profile response : {0}".format(response['linux_profile'].get('admin_username'))) + # self.log("linux_profile self : {0}".format(self.linux_profile[0].get('admin_username'))) + # Cannot Update the Username for now // Let service to handle it + if self.linux_profile and is_property_changed('linux_profile', 'admin_username'): + self.log(("Linux Profile Diff User, Was {0} / Now {1}" + .format(response['linux_profile']['admin_username'], self.linux_profile.get('admin_username')))) + to_be_updated = True + # self.module.warn("linux_profile.admin_username cannot be updated") + + # Cannot have more that one agent pool profile for now + if len(response['agent_pool_profiles']) != len(self.agent_pool_profiles): + self.log("Agent Pool count is diff, need to update") + update_agentpool = True + + if response['kubernetes_version'] != self.kubernetes_version: + upgrade_versions = available_versions.get(response['kubernetes_version']) or available_versions.keys() + if upgrade_versions and self.kubernetes_version not in upgrade_versions: + self.fail('Cannot upgrade kubernetes version to {0}, supported value are {1}'.format(self.kubernetes_version, upgrade_versions)) + to_be_updated = True + + if response['enable_rbac'] != self.enable_rbac: + to_be_updated = True + + if response['api_server_access_profile'] != self.api_server_access_profile and self.api_server_access_profile is not None: + if self.api_server_access_profile.get('enable_private_cluster') != response['api_server_access_profile'].get('enable_private_cluster'): + self.log(("Api Server Access Diff - Origin {0} / Update {1}" + .format(str(self.api_server_access_profile), str(response['api_server_access_profile'])))) + self.fail("The enable_private_cluster of the api server access profile cannot be updated") + elif self.api_server_access_profile.get('authorized_ip_ranges') is not None and \ + len(self.api_server_access_profile.get('authorized_ip_ranges')) != \ + len(response['api_server_access_profile'].get('authorized_ip_ranges', [])): + self.log(("Api Server Access Diff - Origin {0} / Update {1}" + .format(str(self.api_server_access_profile), str(response['api_server_access_profile'])))) + to_be_updated = True + + if self.network_profile: + for key in self.network_profile.keys(): + original = response['network_profile'].get(key) or '' + if self.network_profile[key] and self.network_profile[key].lower() != original.lower(): + to_be_updated = True + + def compare_addon(origin, patch, config): + if not patch: + return True + if not origin: + return False + if origin['enabled'] != patch['enabled']: + return False + config = config or dict() + for key in config.keys(): + if origin.get(config[key]) != patch.get(key): + return False + return True + + if self.addon: + for key in ADDONS.keys(): + addon_name = ADDONS[key]['name'] + if not compare_addon(response['addon'].get(addon_name), self.addon.get(key), ADDONS[key].get('config')): + to_be_updated = True + + for profile_result in response['agent_pool_profiles']: + matched = False + for profile_self in self.agent_pool_profiles: + if profile_result['name'] == profile_self['name']: + matched = True + os_disk_size_gb = profile_self.get('os_disk_size_gb') or profile_result['os_disk_size_gb'] + vnet_subnet_id = profile_self.get('vnet_subnet_id', profile_result['vnet_subnet_id']) + count = profile_self['count'] + orchestrator_version = profile_self['orchestrator_version'] + vm_size = profile_self['vm_size'] + availability_zones = profile_self['availability_zones'] + enable_auto_scaling = profile_self['enable_auto_scaling'] + mode = profile_self['mode'] + max_count = profile_self['max_count'] + node_labels = profile_self['node_labels'] + min_count = profile_self['min_count'] + max_pods = profile_self['max_pods'] + + if max_pods is not None and profile_result['max_pods'] != max_pods: + self.log(("Agent Profile Diff - Origin {0} / Update {1}".format(str(profile_result), str(profile_self)))) + self.fail("The max_pods of the agent pool cannot be updated") + elif vnet_subnet_id is not None and profile_result['vnet_subnet_id'] != vnet_subnet_id: + self.log(("Agent Profile Diff - Origin {0} / Update {1}".format(str(profile_result), str(profile_self)))) + self.fail("The vnet_subnet_id of the agent pool cannot be updated") + elif availability_zones is not None and \ + ' '.join(map(str, profile_result['availability_zones'])) != ' '.join(map(str, availability_zones)): + self.log(("Agent Profile Diff - Origin {0} / Update {1}".format(str(profile_result), str(profile_self)))) + self.fail("The availability_zones of the agent pool cannot be updated") + + if count is not None and profile_result['count'] != count: + self.log(("Agent Profile Diff - Origin {0} / Update {1}".format(str(profile_result), str(profile_self)))) + to_be_updated = True + elif vm_size is not None and profile_result['vm_size'] != vm_size: + self.log(("Agent Profile Diff - Origin {0} / Update {1}".format(str(profile_result), str(profile_self)))) + to_be_updated = True + elif os_disk_size_gb is not None and profile_result['os_disk_size_gb'] != os_disk_size_gb: + self.log(("Agent Profile Diff - Origin {0} / Update {1}".format(str(profile_result), str(profile_self)))) + to_be_updated = True + elif enable_auto_scaling is not None and profile_result['enable_auto_scaling'] != enable_auto_scaling: + self.log(("Agent Profile Diff - Origin {0} / Update {1}".format(str(profile_result), str(profile_self)))) + to_be_updated = True + elif max_count is not None and profile_result['max_count'] != max_count: + self.log(("Agent Profile Diff - Origin {0} / Update {1}".format(str(profile_result), str(profile_self)))) + to_be_updated = True + elif min_count is not None and profile_result['min_count'] != min_count: + self.log(("Agent Profile Diff - Origin {0} / Update {1}".format(str(profile_result), str(profile_self)))) + to_be_updated = True + elif mode is not None and profile_result['mode'] != mode: + self.log(("Agent Profile Diff - Origin {0} / Update {1}".format(str(profile_result), str(profile_self)))) + to_be_updated = True + elif node_labels is not None and profile_result['node_labels'] != node_labels: + self.log(("Agent Profile Diff - Origin {0} / Update {1}".format(str(profile_result), str(profile_self)))) + to_be_updated = True + if not matched: + self.log("Agent Pool not found") + to_be_updated = True + + if update_agentpool: + self.log("Need to update agentpool") + if not self.check_mode: + response_profile_name_list = [response_profile['name'] for response_profile in response['agent_pool_profiles']] + self_profile_name_list = [self_profile['name'] for self_profile in self.agent_pool_profiles] + to_update = list(set(self_profile_name_list) - set(response_profile_name_list)) + to_delete = list(set(response_profile_name_list) - set(self_profile_name_list)) + if len(to_delete) > 0: + self.delete_agentpool(to_delete) + for profile in self.results['agent_pool_profiles']: + if profile['name'] in to_delete: + self.results['agent_pool_profiles'].remove(profile) + if len(to_update) > 0: + self.results['agent_pool_profiles'].extend(self.create_update_agentpool(to_update)) + self.log("Creation / Update done") + self.results['changed'] = True + + if to_be_updated: + self.log("Need to Create / Update the AKS instance") + + if not self.check_mode: + self.results = self.create_update_aks() + self.log("Creation / Update done") + + self.results['changed'] = True + elif update_tags: + self.log("Need to Update the AKS tags") + + if not self.check_mode: + self.results['tags'] = self.update_aks_tags() + self.results['changed'] = True + return self.results + + elif self.state == 'absent' and response: + self.log("Need to Delete the AKS instance") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_aks() + + self.log("AKS instance deleted") + + return self.results + + def create_update_aks(self): + ''' + Creates or updates a managed Azure container service (AKS) with the specified configuration of agents. + + :return: deserialized AKS instance state dictionary + ''' + self.log("Creating / Updating the AKS instance {0}".format(self.name)) + + agentpools = [] + + if self.agent_pool_profiles: + agentpools = [self.create_agent_pool_profile_instance(profile) for profile in self.agent_pool_profiles] + + if self.service_principal: + service_principal_profile = self.create_service_principal_profile_instance(self.service_principal) + identity = None + else: + service_principal_profile = None + identity = self.managedcluster_models.ManagedClusterIdentity(type='SystemAssigned') + + if self.linux_profile: + linux_profile = self.create_linux_profile_instance(self.linux_profile) + else: + linux_profile = None + + parameters = self.managedcluster_models.ManagedCluster( + location=self.location, + dns_prefix=self.dns_prefix, + kubernetes_version=self.kubernetes_version, + tags=self.tags, + service_principal_profile=service_principal_profile, + agent_pool_profiles=agentpools, + linux_profile=linux_profile, + identity=identity, + enable_rbac=self.enable_rbac, + network_profile=self.create_network_profile_instance(self.network_profile), + aad_profile=self.create_aad_profile_instance(self.aad_profile), + api_server_access_profile=self.create_api_server_access_profile_instance(self.api_server_access_profile), + addon_profiles=self.create_addon_profile_instance(self.addon), + node_resource_group=self.node_resource_group + ) + + # self.log("service_principal_profile : {0}".format(parameters.service_principal_profile)) + # self.log("linux_profile : {0}".format(parameters.linux_profile)) + # self.log("ssh from yaml : {0}".format(results.get('linux_profile')[0])) + # self.log("ssh : {0}".format(parameters.linux_profile.ssh)) + # self.log("agent_pool_profiles : {0}".format(parameters.agent_pool_profiles)) + + try: + poller = self.managedcluster_client.managed_clusters.begin_create_or_update(self.resource_group, self.name, parameters) + response = self.get_poller_result(poller) + response.kube_config = self.get_aks_kubeconfig() + return create_aks_dict(response) + except Exception as exc: + self.log('Error attempting to create the AKS instance.') + self.fail("Error creating the AKS instance: {0}".format(exc.message)) + + def update_aks_tags(self): + try: + poller = self.managedcluster_client.managed_clusters.begin_update_tags(self.resource_group, self.name, self.tags) + response = self.get_poller_result(poller) + return response.tags + except Exception as exc: + self.fail("Error attempting to update AKS tags: {0}".format(exc.message)) + + def create_update_agentpool(self, to_update_name_list): + response_all = [] + for profile in self.agent_pool_profiles: + if (profile['name'] in to_update_name_list): + self.log("Creating / Updating the AKS agentpool {0}".format(profile['name'])) + parameters = self.managedcluster_models.AgentPool( + count=profile["count"], + vm_size=profile["vm_size"], + os_disk_size_gb=profile["os_disk_size_gb"], + max_count=profile["max_count"], + node_labels=profile["node_labels"], + min_count=profile["min_count"], + orchestrator_version=profile["orchestrator_version"], + max_pods=profile["max_pods"], + enable_auto_scaling=profile["enable_auto_scaling"], + agent_pool_type=profile["type"], + mode=profile["mode"] + ) + try: + poller = self.managedcluster_client.agent_pools.begin_create_or_update(self.resource_group, self.name, profile["name"], parameters) + response = self.get_poller_result(poller) + response_all.append(response) + except Exception as exc: + self.fail("Error attempting to update AKS agentpool: {0}".format(exc.message)) + return create_agent_pool_profiles_dict(response_all) + + def delete_agentpool(self, to_delete_name_list): + for name in to_delete_name_list: + self.log("Deleting the AKS agentpool {0}".format(name)) + try: + poller = self.managedcluster_client.agent_pools.begin_delete(self.resource_group, self.name, name) + self.get_poller_result(poller) + except Exception as exc: + self.fail("Error attempting to update AKS agentpool: {0}".format(exc.message)) + + def delete_aks(self): + ''' + Deletes the specified managed container service (AKS) in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the AKS instance {0}".format(self.name)) + try: + poller = self.managedcluster_client.managed_clusters.begin_delete(self.resource_group, self.name) + self.get_poller_result(poller) + return True + except Exception as e: + self.log('Error attempting to delete the AKS instance.') + self.fail("Error deleting the AKS instance: {0}".format(e.message)) + return False + + def get_aks(self): + ''' + Gets the properties of the specified container service. + + :return: deserialized AKS instance state dictionary + ''' + self.log("Checking if the AKS instance {0} is present".format(self.name)) + try: + response = self.managedcluster_client.managed_clusters.get(self.resource_group, self.name) + self.log("Response : {0}".format(response)) + self.log("AKS instance : {0} found".format(response.name)) + response.kube_config = self.get_aks_kubeconfig() + return create_aks_dict(response) + except ResourceNotFoundError: + self.log('Did not find the AKS instance.') + return False + + def get_all_versions(self): + try: + result = dict() + response = self.containerservice_client.container_services.list_orchestrators(self.location, resource_type='managedClusters') + orchestrators = response.orchestrators + for item in orchestrators: + result[item.orchestrator_version] = [x.orchestrator_version for x in item.upgrades] if item.upgrades else [] + return result + except Exception as exc: + self.fail('Error when getting AKS supported kubernetes version list for location {0} - {1}'.format(self.location, exc.message or str(exc))) + + def get_aks_kubeconfig(self): + ''' + Gets kubeconfig for the specified AKS instance. + + :return: AKS instance kubeconfig + ''' + access_profile = self.managedcluster_client.managed_clusters.get_access_profile(resource_group_name=self.resource_group, + resource_name=self.name, + role_name="clusterUser") + return access_profile.kube_config.decode('utf-8') + + def create_agent_pool_profile_instance(self, agentpoolprofile): + ''' + Helper method to serialize a dict to a ManagedClusterAgentPoolProfile + :param: agentpoolprofile: dict with the parameters to setup the ManagedClusterAgentPoolProfile + :return: ManagedClusterAgentPoolProfile + ''' + return self.managedcluster_models.ManagedClusterAgentPoolProfile(**agentpoolprofile) + + def create_service_principal_profile_instance(self, spnprofile): + ''' + Helper method to serialize a dict to a ManagedClusterServicePrincipalProfile + :param: spnprofile: dict with the parameters to setup the ManagedClusterServicePrincipalProfile + :return: ManagedClusterServicePrincipalProfile + ''' + return self.managedcluster_models.ManagedClusterServicePrincipalProfile( + client_id=spnprofile['client_id'], + secret=spnprofile['client_secret'] + ) + + def create_linux_profile_instance(self, linuxprofile): + ''' + Helper method to serialize a dict to a ContainerServiceLinuxProfile + :param: linuxprofile: dict with the parameters to setup the ContainerServiceLinuxProfile + :return: ContainerServiceLinuxProfile + ''' + return self.managedcluster_models.ContainerServiceLinuxProfile( + admin_username=linuxprofile['admin_username'], + ssh=self.managedcluster_models.ContainerServiceSshConfiguration(public_keys=[ + self.managedcluster_models.ContainerServiceSshPublicKey(key_data=str(linuxprofile['ssh_key']))]) + ) + + def create_network_profile_instance(self, network): + return self.managedcluster_models.ContainerServiceNetworkProfile(**network) if network else None + + def create_api_server_access_profile_instance(self, server_access): + return self.managedcluster_models.ManagedClusterAPIServerAccessProfile(**server_access) if server_access else None + + def create_aad_profile_instance(self, aad): + return self.managedcluster_models.ManagedClusterAADProfile(**aad) if aad else None + + def create_addon_profile_instance(self, addon): + result = dict() + addon = addon or {} + for key in addon.keys(): + if not ADDONS.get(key): + self.fail('Unsupported addon {0}'.format(key)) + if addon.get(key): + name = ADDONS[key]['name'] + config_spec = ADDONS[key].get('config') or dict() + config = addon[key] + for v in config_spec.keys(): + config[config_spec[v]] = config[v] + result[name] = self.managedcluster_models.ManagedClusterAddonProfile(config=config, enabled=config['enabled']) + return result + + +def main(): + """Main execution""" + AzureRMManagedCluster() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aks_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aks_info.py new file mode 100644 index 000000000..19e802833 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aks_info.py @@ -0,0 +1,188 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Yuwei Zhou, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_aks_info + +version_added: "0.1.2" + +short_description: Get Azure Kubernetes Service facts + +description: + - Get facts for a specific Azure Kubernetes Service or all Azure Kubernetes Services. + +options: + name: + description: + - Limit results to a specific resource group. + resource_group: + description: + - The resource group to search for the desired Azure Kubernetes Service + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + show_kubeconfig: + description: + - Show kubeconfig of the AKS cluster. + - Note the operation will cost more network overhead, not recommended when listing AKS. + choices: + - user + - admin + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Yuwei Zhou (@yuwzho) +''' + +EXAMPLES = ''' + - name: Get facts for one Azure Kubernetes Service + azure_rm_aks_info: + name: Testing + resource_group: myResourceGroup + + - name: Get facts for all Azure Kubernetes Services + azure_rm_aks_info: + + - name: Get facts by tags + azure_rm_aks_info: + tags: + - testing +''' + +RETURN = ''' +azure_aks: + description: List of Azure Kubernetes Service dicts. + returned: always + type: list +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError +except Exception: + # handled in azure_rm_common + pass + +AZURE_OBJECT_CLASS = 'managedClusters' + + +class AzureRMManagedClusterInfo(AzureRMModuleBase): + """Utility class to get Azure Kubernetes Service facts""" + + def __init__(self): + + self.module_args = dict( + name=dict(type='str'), + resource_group=dict(type='str'), + tags=dict(type='list', elements='str'), + show_kubeconfig=dict(type='str', choices=['user', 'admin']), + ) + + self.results = dict( + changed=False, + aks=[], + available_versions=[] + ) + + self.name = None + self.resource_group = None + self.tags = None + self.show_kubeconfig = None + + super(AzureRMManagedClusterInfo, self).__init__( + derived_arg_spec=self.module_args, + supports_check_mode=True, + supports_tags=False, + facts_module=True + ) + + def exec_module(self, **kwargs): + + is_old_facts = self.module._name == 'azure_rm_aks_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_aks_facts' module has been renamed to 'azure_rm_aks_info'", version=(2.9, )) + + for key in self.module_args: + setattr(self, key, kwargs[key]) + + self.results['aks'] = ( + self.get_item() if self.name + else self.list_items() + ) + + return self.results + + def get_item(self): + """Get a single Azure Kubernetes Service""" + + self.log('Get properties for {0}'.format(self.name)) + + item = None + result = [] + + try: + item = self.managedcluster_client.managed_clusters.get(self.resource_group, self.name) + except ResourceNotFoundError: + pass + + if item and self.has_tags(item.tags, self.tags): + result = [self.serialize_obj(item, AZURE_OBJECT_CLASS)] + if self.show_kubeconfig: + result[0]['kube_config'] = self.get_aks_kubeconfig(self.resource_group, self.name) + + return result + + def list_items(self): + """Get all Azure Kubernetes Services""" + + self.log('List all Azure Kubernetes Services') + + try: + response = self.managedcluster_client.managed_clusters.list(self.resource_group) + except Exception as exc: + self.fail('Failed to list all items - {0}'.format(str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + item_dict = self.serialize_obj(item, AZURE_OBJECT_CLASS) + if self.show_kubeconfig: + item_dict['kube_config'] = self.get_aks_kubeconfig(self.resource_group, item.name) + results.append(item_dict) + + return results + + def get_aks_kubeconfig(self, resource_group, name): + ''' + Gets kubeconfig for the specified AKS instance. + + :return: AKS instance kubeconfig + ''' + if not self.show_kubeconfig: + return '' + role_name = 'cluster{0}'.format(str.capitalize(self.show_kubeconfig)) + access_profile = self.managedcluster_client.managed_clusters.get_access_profile(resource_group, name, role_name) + return access_profile.kube_config.decode('utf-8') + + +def main(): + """Main module execution code path""" + + AzureRMManagedClusterInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aksagentpool.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aksagentpool.py new file mode 100644 index 000000000..b578826f6 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aksagentpool.py @@ -0,0 +1,526 @@ +#!/usr/bin/python +# +# Copyright (c) 2022 xuzhang3 (@xuzhang3), Fred-sun (@Fred-sun) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_aksagentpool +version_added: '1.14.0' +short_description: Manage node pools in Kubernetes kubernetes cluster +description: + - Create, update or delete node pools in kubernetes cluster. + +options: + resource_group: + description: + - The name of the resource group. + type: str + required: True + cluster_name: + description: + - The name of the kubernetes cluster. + type: str + required: True + name: + description: + - The name of the node agent pool. + type: str + required: True + count: + description: + - Number of agents (VMs) to host docker containers. + type: int + vm_size: + description: + - Size of agent VMs + type: str + os_disk_size_gb: + description: + - OS Disk Size in GB to be used to specify the disk size for every machine in this master/agent pool. + type: int + vnet_subnet_id: + description: + - VNet SubnetID specifies the VNet's subnet identifier. + type: str + availability_zones: + description: + - Availability zones for nodes. Must use VirtualMachineScaleSets AgentPoolType. + type: list + elements: int + choices: + - 1 + - 2 + - 3 + os_type: + description: + - OsType to be used to specify os type. + type: str + choices: + - Linux + - Windows + orchestrator_version: + description: + - Version of orchestrator specified when creating the managed cluster. + type: str + type_properties_type: + description: + - AgentPoolType represents types of an agent pool. + type: str + choices: + - VirtualMachineScaleSets + - AvailabilitySet + mode: + description: + - AgentPoolMode represents mode of an agent pool. + type: str + choices: + - System + - User + enable_auto_scaling: + description: + - Whether to enable auto-scaler. + type: bool + max_count: + description: + - Maximum number of nodes for auto-scaling. + type: int + node_labels: + description: + - Agent pool node labels to be persisted across all nodes in agent pool. + type: dict + min_count: + description: + - Minimum number of nodes for auto-scaling. + type: int + max_pods: + description: + - Maximum number of pods that can run on a node. + type: int + state: + description: + - State of the automation runbook. Use C(present) to create or update a automation runbook and use C(absent) to delete. + type: str + default: present + choices: + - present + - absent + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - xuzhang3 (@xuzhang3) + - Fred Sun (@Fred-sun) + +''' + +EXAMPLES = ''' +- name: Add new node agent pool + azure_rm_aksagentpool: + resource_group: "{{ resource_group }}" + cluster_name: aksfred01 + name: default-new + count: 2 + vm_size: Standard_B2s + type_properties_type: VirtualMachineScaleSets + mode: System + node_labels: {"release":"stable"} + max_pods: 42 + orchestrator_version: 1.23.5 + availability_zones: + - 1 + - 2 +- name: Delete node agent pool + azure_rm_aksagentpool: + resource_group: "{{ resource_group }}" + cluster_name: aksfred01 + name: default-new +''' + +RETURN = ''' +aks_agent_pools: + description: + - Details for a node pool in the managed Kubernetes cluster. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + type: str + returned: always + sample: "/subscriptions/xxx-xxxf/resourcegroups/myRG/providers/Microsoft.ContainerService/managedClusters/cluster/agentPools/default" + resource_group: + description: + - Resource group name. + type: str + returned: always + sample: myRG + name: + description: + - Resource name. + type: str + returned: always + sample: default + cluster_name: + description: + - The cluster name. + type: str + returned: always + sample: testcluster + availability_zones: + description: + - Availability zones for nodes. Must use VirtualMachineScaleSets AgentPoolType. + type: list + returned: always + sample: [1, 2] + count: + description: + - Number of agents (VMs) to host docker containers. + type: int + returned: always + sample: 2 + enable_auto_scaling: + description: + - Whether to enable auto-scaler. + type: str + returned: always + sample: null + enable_node_public_ip: + description: + - Enable public IP for nodes. + type: str + returned: always + sample: bool + max_count: + description: + - Maximum number of nodes for auto-scaling. + type: int + returned: always + sample: 10 + min_count: + description: + - Minimum number of nodes for auto-scaling. + type: int + returned: always + sample: 1 + max_pods: + description: + - Maximum number of pods that can run on a node. + type: int + returned: always + sample: 42 + mode: + description: + - AgentPoolMode represents mode of an agent pool. + type: str + returned: always + sample: System + node_image_version: + description: + - Version of node image. + type: str + returned: always + sample: AKSUbuntu-1804gen2containerd-2022.08.23 + node_labels: + description: + - Agent pool node labels to be persisted across all nodes in agent pool. + type: list + returned: always + sample: ["release": "stable"] + node_taints: + description: + - Taints added to new nodes during node pool create and scale. + type: str + returned: always + sample: null + orchestrator_version: + description: + - Version of orchestrator specified when creating the managed cluster. + type: str + returned: always + sample: 1.22.11 + os_disk_size_gb: + description: + - OS Disk Size in GB to be used to specify the disk size for every machine in this master/agent pool. + type: int + returned: always + sample: 128 + os_type: + description: + - OsType to be used to specify os type. + type: str + returned: always + sample: Linux + provisioning_state: + description: + - The current deployment or provisioning state, which only appears in the response. + type: str + returned: always + sample: Succeeded + scale_set_eviction_policy: + description: + - ScaleSetEvictionPolicy to be used to specify eviction policy for Spot virtual machine scale set. + type: str + returned: always + sample: null + scale_set_priority: + description: + - caleSetPriority to be used to specify virtual machine scale set priority. + type: str + returned: always + sample: null + spot_max_price: + description: + - SpotMaxPrice to be used to specify the maximum price you are willing to pay in US Dollars. + type: float + returned: always + sample: null + type: + description: + - Resource Type. + type: str + returned: always + sample: Microsoft.ContainerService/managedClusters/agentPools + type_properties_type: + description: + - AgentPoolType represents types of an agent pool. + type: str + returned: always + sample: VirtualMachineScaleSets + upgrade_settings: + description: + - Settings for upgrading the agentpool. + type: str + returned: always + sample: null + vm_size: + description: + - Size of agent VMs. + type: str + returned: always + sample: Standard_B2s + vnet_subnet_id: + description: + - VNet SubnetID specifies the VNet's subnet identifier. + type: str + returned: always + sample: null +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.core.polling import LROPoller +except ImportError: + pass + + +class AzureRMAksAgentPool(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + cluster_name=dict( + type='str', + required=True + ), + count=dict( + type='int', + ), + vm_size=dict( + type='str', + ), + os_disk_size_gb=dict( + type='int' + ), + vnet_subnet_id=dict( + type='str' + ), + availability_zones=dict( + type='list', + elements='int', + choices=[1, 2, 3] + ), + os_type=dict( + type='str', + choices=['Linux', 'Windows'] + ), + orchestrator_version=dict( + type='str', + ), + type_properties_type=dict( + type='str', + choices=['VirtualMachineScaleSets', 'AvailabilitySet'] + ), + mode=dict( + type='str', + choices=['System', 'User'], + ), + enable_auto_scaling=dict( + type='bool' + ), + max_count=dict( + type='int' + ), + node_labels=dict( + type='dict' + ), + min_count=dict( + type='int' + ), + max_pods=dict( + type='int' + ), + state=dict( + type='str', + choices=['present', 'absent'], + default='present' + ) + ) + # store the results of the module operation + self.results = dict() + self.resource_group = None + self.name = None + self.cluster_name = None + self.count = None + self.vm_size = None + self.mode = None + self.os_disk_size_gb = None + self.storage_profiles = None + self.vnet_subnet_id = None + self.availability_zones = None + self.os_type = None + self.orchestrator_version = None + self.type_properties_type = None + self.enable_auto_scaling = None + self.max_count = None + self.node_labels = None + self.min_count = None + self.max_pods = None + self.body = dict() + + super(AzureRMAksAgentPool, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=True) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec): + setattr(self, key, kwargs[key]) + if key not in ['resource_group', 'cluster_name', 'name', 'state']: + self.body[key] = kwargs[key] + + agent_pool = self.get() + changed = False + response = None + + if self.state == 'present': + if agent_pool: + for key in self.body.keys(): + if self.body[key] is not None and self.body[key] != agent_pool[key]: + changed = True + else: + self.body[key] = agent_pool[key] + else: + changed = True + + if changed: + if not self.check_mode: + response = self.create_or_update(self.body) + + else: + if not self.check_mode: + if agent_pool: + response = self.delete_agentpool() + changed = True + else: + changed = False + else: + changed = True + + self.results['changed'] = changed + self.results['aks_agent_pools'] = response + return self.results + + def get(self): + try: + response = self.managedcluster_client.agent_pools.get(self.resource_group, self.cluster_name, self.name) + return self.to_dict(response) + except ResourceNotFoundError: + pass + + def create_or_update(self, parameters): + try: + response = self.managedcluster_client.agent_pools.begin_create_or_update(self.resource_group, self.cluster_name, self.name, parameters) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + return self.to_dict(response) + except Exception as exc: + self.fail('Error when creating cluster node agent pool {0}: {1}'.format(self.name, exc)) + + def delete_agentpool(self): + try: + response = self.managedcluster_client.agent_pools.begin_delete(self.resource_group, self.cluster_name, self.name) + except Exception as exc: + self.fail('Error when deleting cluster agent pool {0}: {1}'.format(self.name, exc)) + + def to_dict(self, agent_pool): + if not agent_pool: + return None + agent_pool_dict = dict( + resource_group=self.resource_group, + cluster_name=self.cluster_name, + id=agent_pool.id, + type=agent_pool.type, + name=agent_pool.name, + count=agent_pool.count, + vm_size=agent_pool.vm_size, + os_disk_size_gb=agent_pool.os_disk_size_gb, + vnet_subnet_id=agent_pool.vnet_subnet_id, + max_pods=agent_pool.max_pods, + os_type=agent_pool.os_type, + max_count=agent_pool.max_count, + min_count=agent_pool.min_count, + enable_auto_scaling=agent_pool.enable_auto_scaling, + type_properties_type=agent_pool.type_properties_type, + mode=agent_pool.mode, + orchestrator_version=agent_pool.orchestrator_version, + node_image_version=agent_pool.node_image_version, + upgrade_settings=agent_pool.upgrade_settings, + provisioning_state=agent_pool.provisioning_state, + availability_zones=[], + enable_node_public_ip=agent_pool.enable_node_public_ip, + scale_set_priority=agent_pool.scale_set_priority, + scale_set_eviction_policy=agent_pool.scale_set_eviction_policy, + spot_max_price=agent_pool.spot_max_price, + node_labels=agent_pool.node_labels, + node_taints=agent_pool.node_taints, + ) + + if agent_pool.availability_zones is not None: + for key in agent_pool.availability_zones: + agent_pool_dict['availability_zones'].append(int(key)) + + return agent_pool_dict + + +def main(): + AzureRMAksAgentPool() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aksagentpool_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aksagentpool_info.py new file mode 100644 index 000000000..977261da8 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aksagentpool_info.py @@ -0,0 +1,335 @@ +#!/usr/bin/python +# +# Copyright (c) 2022 xuzhang3 (@xuzhang3), Fred-sun (@Fred-sun) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_aksagentpool_info +version_added: '1.14.0' +short_description: Show the details for a node pool in the managed Kubernetes cluster +description: + - Get the details for a node pool in the managed Kubernetes cluster. + +options: + resource_group: + description: + - The name of the resource group. + type: str + required: True + cluster_name: + description: + - The cluster name. + type: str + required: True + name: + description: + - The node pool name. + type: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - xuzhang3 (@xuzhang3) + - Fred Sun (@Fred-sun) +''' + +EXAMPLES = ''' + - name: Get node agent pool by cluster name + azure_rm_aksagentpool_info: + resource_group: myRG + cluster_name: testcluster + + - name: Get node agent pool by name + azure_rm_aksagentpool_info: + resource_group: myRG + cluster_name: testcluster + name: default + +''' + +RETURN = ''' +aks_agent_pools: + description: + - Details for a node pool in the managed Kubernetes cluster. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + type: str + returned: always + sample: "/subscriptions/xxx-xxxf/resourcegroups/myRG/providers/Microsoft.ContainerService/managedClusters/cluster/agentPools/default" + resource_group: + description: + - Resource group name. + type: str + returned: always + sample: myRG + name: + description: + - Resource name. + type: str + returned: always + sample: default + cluster_name: + description: + - The cluster name. + type: str + returned: always + sample: testcluster + availability_zones: + description: + - Availability zones for nodes. Must use VirtualMachineScaleSets AgentPoolType. + type: list + returned: always + sample: [1, 2] + count: + description: + - Number of agents (VMs) to host docker containers. + type: int + returned: always + sample: 2 + enable_auto_scaling: + description: + - Whether to enable auto-scaler. + type: bool + returned: always + sample: null + enable_node_public_ip: + description: + - Enable public IP for nodes. + type: bool + returned: always + sample: True + max_count: + description: + - Maximum number of nodes for auto-scaling. + type: int + returned: always + sample: 10 + min_count: + description: + - Minimum number of nodes for auto-scaling. + type: int + returned: always + sample: 1 + max_pods: + description: + - Maximum number of pods that can run on a node. + type: int + returned: always + sample: 42 + mode: + description: + - AgentPoolMode represents mode of an agent pool. + type: str + returned: always + sample: System + node_image_version: + description: + - Version of node image. + type: str + returned: always + sample: AKSUbuntu-1804gen2containerd-2022.08.23 + node_labels: + description: + - Agent pool node labels to be persisted across all nodes in agent pool. + type: list + returned: always + sample: ["release":"stable"] + node_taints: + description: + - Taints added to new nodes during node pool create and scale. + type: str + returned: always + sample: null + orchestrator_version: + description: + - Version of orchestrator specified when creating the managed cluster. + type: str + returned: always + sample: 1.22.11 + os_disk_size_gb: + description: + - OS Disk Size in GB to be used to specify the disk size for every machine in this master agent pool. + type: int + returned: always + sample: 128 + os_type: + description: + - OsType to be used to specify os type. + type: str + returned: always + sample: Linux + provisioning_state: + description: + - The current deployment or provisioning state, which only appears in the response. + type: str + returned: always + sample: Succeeded + scale_set_eviction_policy: + description: + - ScaleSetEvictionPolicy to be used to specify eviction policy for Spot virtual machine scale set. + type: str + returned: always + sample: null + scale_set_priority: + description: + - caleSetPriority to be used to specify virtual machine scale set priority. + type: str + returned: always + sample: null + spot_max_price: + description: + - SpotMaxPrice to be used to specify the maximum price you are willing to pay in US Dollars. + type: float + returned: always + sample: null + type: + description: + - Resource Type. + type: str + returned: always + sample: Microsoft.ContainerService/managedClusters/agentPools + type_properties_type: + description: + - AgentPoolType represents types of an agent pool. + type: str + returned: always + sample: VirtualMachineScaleSets + upgrade_settings: + description: + - Settings for upgrading the agentpool. + type: str + returned: always + sample: null + vm_size: + description: + - Size of agent VMs. + type: str + returned: always + sample: Standard_B2s + vnet_subnet_id: + description: + - VNet SubnetID specifies the VNet's subnet identifier. + type: str + returned: always + sample: null +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + pass + + +class AzureRMAgentPoolInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str' + ), + cluster_name=dict( + type='str', + required=True + ) + ) + # store the results of the module operation + self.results = dict() + self.resource_group = None + self.name = None + self.cluster_name = None + + super(AzureRMAgentPoolInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False, facts_module=True) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec): + setattr(self, key, kwargs[key]) + + if self.name: + aks_agent_pools = [self.get_agentpool()] + else: + aks_agent_pools = self.list_agentpool() + self.results['aks_agent_pools'] = [self.to_dict(x) for x in aks_agent_pools] + return self.results + + def get_agentpool(self): + try: + return self.managedcluster_client.agent_pools.get(self.resource_group, self.cluster_name, self.name) + except ResourceNotFoundError: + pass + + def list_agentpool(self): + result = [] + try: + resp = self.managedcluster_client.agent_pools.list(self.resource_group, self.cluster_name) + while True: + result.append(resp.next()) + except StopIteration: + pass + except Exception: + pass + return result + + def to_dict(self, agent_pool): + if not agent_pool: + return None + agent_pool_dict = dict( + resource_group=self.resource_group, + cluster_name=self.cluster_name, + id=agent_pool.id, + type=agent_pool.type, + name=agent_pool.name, + count=agent_pool.count, + vm_size=agent_pool.vm_size, + os_disk_size_gb=agent_pool.os_disk_size_gb, + vnet_subnet_id=agent_pool.vnet_subnet_id, + max_pods=agent_pool.max_pods, + os_type=agent_pool.os_type, + max_count=agent_pool.max_count, + min_count=agent_pool.min_count, + enable_auto_scaling=agent_pool.enable_auto_scaling, + type_properties_type=agent_pool.type_properties_type, + mode=agent_pool.mode, + availability_zones=[], + orchestrator_version=agent_pool.orchestrator_version, + node_image_version=agent_pool.node_image_version, + upgrade_settings=agent_pool.upgrade_settings, + provisioning_state=agent_pool.provisioning_state, + enable_node_public_ip=agent_pool.enable_node_public_ip, + scale_set_priority=agent_pool.scale_set_priority, + scale_set_eviction_policy=agent_pool.scale_set_eviction_policy, + spot_max_price=agent_pool.spot_max_price, + node_labels=agent_pool.node_labels, + node_taints=agent_pool.node_taints, + ) + + if agent_pool.availability_zones is not None: + for key in agent_pool.availability_zones: + agent_pool_dict['availability_zones'].append(int(key)) + + return agent_pool_dict + + +def main(): + AzureRMAgentPoolInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aksagentpoolversion_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aksagentpoolversion_info.py new file mode 100644 index 000000000..33cf24317 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aksagentpoolversion_info.py @@ -0,0 +1,116 @@ +#!/usr/bin/python +# +# Copyright (c) 2022 xuzhang3 (@xuzhang3), Fred-sun (@Fred-sun) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_aksagentpoolversion_info + +version_added: "1.14.0" + +short_description: Gets a list of supported versions for the specified agent pool + +description: + - Gets a list of supported versions for the specified agent pool. + +options: + resource_group: + description: + - The name of the resource group. + required: true + type: str + cluster_name: + description: + - The name of the managed cluster resource. + required: true + type: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - xuzhang3 (@xuzhang3) + - Fred-sun (@Fred-sun) +''' + +EXAMPLES = ''' + - name: Get available versions an AKS can be upgrade to + azure_rm_aksagentpoolversion_info: + resource_group: myResourceGroup + cluster_name: myAKSName +''' + +RETURN = ''' +azure_orchestrator_version: + description: + - List of supported kubernetes versions. + returned: always + type: list + sample: ['1.22.6', '1.22.11'] +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + + +class AzureRMAksAgentPoolVersion(AzureRMModuleBase): + + def __init__(self): + + self.module_args = dict( + resource_group=dict(type='str', required=True), + cluster_name=dict(type='str', required=True), + ) + + self.results = dict( + changed=False, + azure_orchestrator_version=[] + ) + + self.resource_group = None + self.cluster_name = None + + super(AzureRMAksAgentPoolVersion, self).__init__( + derived_arg_spec=self.module_args, + supports_check_mode=True, + supports_tags=False, + facts_module=True + ) + + def exec_module(self, **kwargs): + + for key in self.module_args: + setattr(self, key, kwargs[key]) + + self.results['azure_orchestrator_version'] = self.get_all_versions() + + return self.results + + def get_all_versions(self): + ''' + Get all avaliable orchestrator version + ''' + try: + result = list() + response = self.managedcluster_client.agent_pools.get_available_agent_pool_versions(self.resource_group, self.cluster_name) + orchestrators = response.agent_pool_versions + for item in orchestrators: + result.append(item.kubernetes_version) + return result + except Exception as exc: + self.fail('Error when getting Agentpool supported orchestrator version list for locatio', exc) + + +def main(): + """Main module execution code path""" + + AzureRMAksAgentPoolVersion() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aksupgrade_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aksupgrade_info.py new file mode 100644 index 000000000..ae17906a2 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aksupgrade_info.py @@ -0,0 +1,229 @@ +#!/usr/bin/python +# +# Copyright (c) 2021 Andrii Bilorus, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_aksupgrade_info + +version_added: "1.4.0" + +short_description: Get the upgrade versions available for a AKS instance + +description: + - Get the upgrade versions available for a managed Azure Container Service (AKS) instance. + +options: + resource_group: + description: + - Name of a resource group where the managed Azure Container Services (AKS) exists. + required: true + type: str + name: + description: + - Name of the managed Azure Container Services (AKS) instance. + required: true + type: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Andrii Bilorus (@ewscat) +''' + +EXAMPLES = ''' + - name: Get available upgrade versions for AKS instance + azure_rm_aksupgrade_info: + name: myAKS + resource_group: myResourceGroup + register: myAKSupgrades +''' + +RETURN = ''' +azure_aks_upgrades: + description: Supported AKS instance versions for upgrade by agent pools and control plane. + returned: always + type: complex + contains: + agent_pool_profiles: + description: Available upgrade versions for agent pools + returned: always + type: complex + contains: + upgrades: + description: List of orchestrator types and versions available for upgrade. + type: complex + contains: + is_preview: + description: Is the version available in preview + type: bool + kubernetes_version: + description: Kubernetes version + type: str + sample: "1.19.3" + os_type: + description: Operating system type + type: str + sample: "Linux" + name: + description: Pool name + type: str + sample: "my_pool" + kubernetes_version: + description: Current kubernetes version + type: str + sample: "1.18.1" + control_plane_profile: + description: Available upgrade versions for control plane + returned: always + type: complex + contains: + upgrades: + description: List of orchestrator types and versions available for upgrade. + type: complex + contains: + is_preview: + description: Is the version available in preview + type: bool + kubernetes_version: + description: Kubernetes version + type: str + sample: "1.19.3" + os_type: + description: Operating system type + type: str + sample: "Linux" + name: + description: Pool name + type: str + sample: "my_pool" + kubernetes_version: + description: Current kubernetes version + type: str + sample: "1.18.1" +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError +except Exception: + # handled in azure_rm_common + pass + + +class AzureRMAKSUpgrade(AzureRMModuleBase): + ''' + Utility class to get Azure Kubernetes Service upgrades + ''' + + def __init__(self): + + self.module_args = dict( + name=dict(type='str', required=True), + resource_group=dict(type='str', required=True) + ) + + self.results = dict( + changed=False, + azure_aks_upgrades=[] + ) + + self.name = None + self.resource_group = None + + super(AzureRMAKSUpgrade, self).__init__( + derived_arg_spec=self.module_args, + supports_check_mode=True, + supports_tags=False, + facts_module=True + ) + + def exec_module(self, **kwargs): + for key in self.module_args: + setattr(self, key, kwargs[key]) + + self.results['azure_aks_upgrades'] = self.get_upgrades(self.name, self.resource_group) + + return self.results + + def get_upgrades(self, name, resource_group): + ''' + Get supported upgrade version for AKS + :param: name: str with name of AKS cluster instance + :param: resource_group: str with resource group containing AKS instance + :return: dict with available versions for pool profiles and control plane + ''' + cluster = None + upgrade_profiles = None + + self.log('Get properties for {0}'.format(self.name)) + try: + cluster = self.managedcluster_client.managed_clusters.get(resource_group_name=resource_group, resource_name=name) + except ResourceNotFoundError as err: + self.fail('Error when getting AKS cluster information for {0} : {1}'.format(self.name, err.message or str(err))) + + self.log('Get available upgrade versions for {0}'.format(self.name)) + try: + upgrade_profiles = self.managedcluster_client.managed_clusters.get_upgrade_profile(resource_group_name=resource_group, + resource_name=name) + except ResourceNotFoundError as err: + self.fail('Error when getting upgrade versions for {0} : {1}'.format(self.name, err.message or str(err))) + + return dict( + agent_pool_profiles=[self.parse_profile(profile) + if profile.upgrades else self.default_profile(cluster) + for profile in upgrade_profiles.agent_pool_profiles] + if upgrade_profiles.agent_pool_profiles else None, + control_plane_profile=self.parse_profile(upgrade_profiles.control_plane_profile) + if upgrade_profiles.control_plane_profile.upgrades + else self.default_profile(cluster) + ) + + def default_profile(self, cluster): + ''' + Used when upgrade profile returned by Azure in None + (i.e. when the cluster runs latest version) + :param: cluster: ManagedCluster with AKS instance information + :return: dict containing upgrade profile with current cluster version + ''' + return dict( + upgrades=None, + kubernetes_version=cluster.kubernetes_version, + name=None, + os_type=None + ) + + def parse_profile(self, profile): + ''' + Transform cluster profile object to dict + :param: profile: ManagedClusterUpgradeProfile with AKS upgrade profile info + :return: dict with upgrade profiles + ''' + return dict( + upgrades=[dict( + is_preview=upgrade.is_preview, + kubernetes_version=upgrade.kubernetes_version + ) for upgrade in profile.upgrades], + kubernetes_version=profile.kubernetes_version, + name=profile.name, + os_type=profile.os_type + ) + + +def main(): + ''' + Main module execution code path + ''' + AzureRMAKSUpgrade() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aksversion_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aksversion_info.py new file mode 100644 index 000000000..4306fcce8 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aksversion_info.py @@ -0,0 +1,130 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Yuwei Zhou, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_aksversion_info + +version_added: "0.1.2" + +short_description: Get available kubernetes versions supported by Azure Kubernetes Service + +description: + - Get available kubernetes versions supported by Azure Kubernetes Service. + +options: + location: + description: + - Get the versions available for creating a managed Kubernetes cluster. + required: true + version: + description: + - Get the upgrade versions available for a managed Kubernetes cluster version. + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Yuwei Zhou (@yuwzho) +''' + +EXAMPLES = ''' + - name: Get available versions for AKS in location eastus + azure_rm_aksversion_info: + location: eastus + - name: Get available versions an AKS can be upgrade to + azure_rm_aksversion_info: + location: eastis + version: 1.11.6 +''' + +RETURN = ''' +azure_aks_versions: + description: List of supported kubernetes versions. + returned: always + type: list +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from msrestazure.azure_exceptions import CloudError + from azure.common import AzureHttpError +except Exception: + # handled in azure_rm_common + pass + + +class AzureRMAKSVersion(AzureRMModuleBase): + + def __init__(self): + + self.module_args = dict( + location=dict(type='str', required=True), + version=dict(type='str') + ) + + self.results = dict( + changed=False, + azure_aks_versions=[] + ) + + self.location = None + self.version = None + + super(AzureRMAKSVersion, self).__init__( + derived_arg_spec=self.module_args, + supports_check_mode=True, + supports_tags=False, + facts_module=True + ) + + def exec_module(self, **kwargs): + + is_old_facts = self.module._name == 'azure_rm_aksversion_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_aksversion_facts' module has been renamed to 'azure_rm_aksversion_info'", version=(2.9, )) + + for key in self.module_args: + setattr(self, key, kwargs[key]) + + self.results['azure_aks_versions'] = self.get_all_versions(self.location, self.version) + + return self.results + + def get_all_versions(self, location, version): + ''' + Get all kubernetes version supported by AKS + :return: ordered version list + ''' + try: + result = dict() + response = self.containerservice_client.container_services.list_orchestrators(self.location, resource_type='managedClusters') + orchestrators = response.orchestrators + for item in orchestrators: + result[item.orchestrator_version] = [x.orchestrator_version for x in item.upgrades] if item.upgrades else [] + if version: + return result.get(version) or [] + else: + keys = list(result.keys()) + keys.sort() + return keys + except Exception as exc: + self.fail('Error when getting AKS supported kubernetes version list for location {0} - {1}'.format(self.location, exc.message or str(exc))) + + +def main(): + """Main module execution code path""" + + AzureRMAKSVersion() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_apimanagement.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_apimanagement.py new file mode 100644 index 000000000..221bd44b9 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_apimanagement.py @@ -0,0 +1,670 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 Sakar Mehra (@sakar97), Nikhil Patne (@nikhilpatne) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_apimanagement +version_added: "1.6.0" +short_description: Manage Azure api instances +description: + - Create azure api instance. + - Update the existing azure api instance. + - Delete azure api instance. + +options: + resource_group: + description: + - The name of the resource group. + required: true + type: str + service_name: + description: + - The name of the API Management service. + required: true + type: str + api_id: + description: + - API revision identifier. It must be unique in the current API Management service instance. + required: true + type: str + description: + description: + - Description of the API. + type: str + authentication_settings: + description: + - Collection of authentication settings included into this API. + type: dict + suboptions: + o_auth2: + description: + - OAuth2 Authentication settings + type: dict + suboptions: + authorization_server_id: + description: + - OAuth authorization server identifier. + type: str + scope: + description: + - operations scope. + type: str + openid: + description: + - OpenID Connect Authentication Settings + type: dict + suboptions: + openid_provider_id: + description: + - OAuth authorization server identifier. + type: str + bearer_token_sending_methods: + description: + - How to send token to the server. + type: list + elements: str + choices: + - authorizationHeader + - query + subscription_key_parameter_names: + description: + - Protocols over which API is made available. + type: dict + suboptions: + header: + description: + - Subscription key header name. + type: str + query: + description: + - Subscription key query string parameter name. + type: str + type: + description: + - Type of API + type: str + choices: + - http + - soap + api_revision: + description: + - Describes the Revision of the Api. + - If no value is provided, default revision 1 is created + type: str + api_version: + description: + - Indicates the Version identifier of the API if the API is versioned + type: str + is_current: + description: + - Indicates if API revision is current api revision. + type: bool + api_revision_description: + description: + - Description of the Api Revision. + type: str + api_version_description: + description: + - Description of the Api Version. + type: str + api_version_set_id: + description: + - A resource identifier for the related ApiVersionSet. + type: str + subscription_required: + description: + - Specifies whether an API or Product subscription is required for accessing the API. + type: bool + source_api_id: + description: + - API identifier of the source API. + type: str + display_name: + description: + - API Name to be displayed. It must be 1 to 300 characters long. + type: str + service_url: + description: + - Absolute URL of the backend service implementing this API + - Cannot be more than 2000 characters long. + type: str + path: + description: + - Relative URL uniquely identifying this API. + type: str + protocols: + description: + - Describes on which protocols the operations in this API can be invoked. + type: list + elements: str + choices: + - http + - https + api_version_set: + description: + - Version set details + type: dict + suboptions: + id: + description: + - Identifier for existing API Version Set + - Omit this value to create a new Version Set. + type: str + name: + description: + - The display Name of the API Version Set. + type: str + description: + description: + - Description of API Version Set. + type: str + versioning_scheme: + description: + - An value that determines where the API Version identifer will be located in a HTTP request. + type: str + choices: + - Segment + - Query + - Header + version_query_name: + description: + - Name of query parameter that indicates the API Version if versioningScheme is set to `query`. + type: str + version_header_name: + description: + - Name of HTTP header parameter that indicates the API Version if versioningScheme is set to `header`. + type: str + value: + description: + - Content value when Importing an API. + type: str + format: + description: + - Format of the Content in which the API is getting imported. + type: str + choices: + - wadl-xml + - wadl-link-json + - swagger-json + - swagger-link-json + - wsdl + - wsdl-link + - openapi + - openapi+json + - openapi-link + wsdl_selector: + description: + - Criteria to limit import of WSDL to a subset of the document. + type: dict + suboptions: + wsdl_service_name: + description: + - Name of service to import from WSDL. + type: str + wsdl_endpoint_name: + description: + - Name of endpoint(port) to import from WSDL. + type: str + api_type: + description: + - Type of Api to create. + - C(http) creates a SOAP to REST API. + - C(soap) creates a SOAP pass-through API. + type: str + choices: + - soap + - http + state: + description: + - State of the Api. + - Use C(present) to create or update an Api and C(absent) to delete it. + type: str + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Sakar Mehra (@sakar97) + - Nikhil Patne (@nikhilpatne) + +''' + +EXAMPLES = ''' + - name: Create a new API instance + azure_rm_apimanagement: + resource_group: 'myResourceGroup' + service_name: myService + api_id: testApi + description: testDescription + display_name: TestAPI + service_url: 'http://testapi.example.net/api' + path: myapiPath + protocols: + - https + - name: Update an existing API instance. + azure_rm_apimanagement: + resource_group: myResourceGroup + service_name: myService + api_id: testApi + display_name: newTestAPI + service_url: 'http://testapi.example.net/api' + path: myapiPath + protocols: + - https + - name: ApiManagementDeleteApi + azure_rm_apimanagement: + resource_group: myResourceGroup + service_name: myService + api_id: testApi + state: absent +''' + +RETURN = \ + ''' +id: + description: + - Resource ID. + returned: always + type: str + sample: null +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_rest import GenericRestClient +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt +from copy import deepcopy +import time +import json +import re +try: + from msrestazure.azure_exceptions import CloudError +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureApiManagement(AzureRMModuleBaseExt): + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + updatable=False, + disposition='resourceGroupName', + required=True + ), + service_name=dict( + type='str', + updatable=False, + disposition='serviceName', + required=True + ), + api_id=dict( + type='str', + updatable=False, + disposition='apiId', + required=True + ), + description=dict( + type='str', + disposition='/properties/description' + ), + authentication_settings=dict( + type='dict', + disposition='/properties/authenticationSettings', + options=dict( + o_auth2=dict( + type='dict', + disposition='oAuth2', + options=dict( + authorization_server_id=dict( + type='str', + disposition='authorizationServerId' + ), + scope=dict( + type='str', + disposition='scope' + ) + ) + ), + openid=dict( + type='dict', + options=dict( + openid_provider_id=dict( + type='str', + disposition='openidProviderId' + ), + bearer_token_sending_methods=dict( + type='list', + elements='str', + disposition='bearerTokenSendingMethods', + choices=['authorizationHeader', 'query'] + ) + ) + ) + ) + ), + subscription_key_parameter_names=dict( + type='dict', + no_log=True, + disposition='/properties/subscriptionKeyParameterNames', + options=dict( + header=dict( + type='str', + required=False + ), + query=dict( + type='str', + required=False + ) + ) + ), + type=dict( + type='str', + disposition='/properties/type', + choices=['http', 'soap'] + ), + api_revision=dict( + type='str', + disposition='/properties/apiRevision' + ), + api_version=dict( + type='str', + disposition='/properties/apiVersion' + ), + is_current=dict( + type='bool', + disposition='/properties/isCurrent' + ), + api_revision_description=dict( + type='str', + disposition='/properties/apiRevisionDescription' + ), + api_version_description=dict( + type='str', + disposition='/properties/apiVersionDescription' + ), + api_version_set_id=dict( + type='str', + disposition='/properties/apiVersionSetId', + ), + subscription_required=dict( + type='bool', + disposition='/properties/subscriptionRequired' + ), + source_api_id=dict( + type='str', + disposition='/properties/sourceApiId', + ), + display_name=dict( + type='str', + disposition='/properties/displayName' + ), + service_url=dict( + type='str', + disposition='/properties/serviceUrl' + ), + path=dict( + type='str', + disposition='/properties/*', + ), + protocols=dict( + type='list', + elements='str', + disposition='/properties/protocols', + choices=['http', + 'https'] + ), + api_version_set=dict( + type='dict', + disposition='/properties/apiVersionSet', + options=dict( + id=dict( + type='str' + ), + name=dict( + type='str' + ), + description=dict( + type='str' + ), + versioning_scheme=dict( + type='str', + disposition='versioningScheme', + choices=['Segment', + 'Query', + 'Header'] + ), + version_query_name=dict( + type='str', + disposition='versionQueryName' + ), + version_header_name=dict( + type='str', + disposition='versionHeaderName' + ) + ) + ), + value=dict( + type='str', + disposition='/properties/*' + ), + format=dict( + type='str', + disposition='/properties/*', + choices=['wadl-xml', + 'wadl-link-json', + 'swagger-json', + 'swagger-link-json', + 'wsdl', + 'wsdl-link', + 'openapi', + 'openapi+json', + 'openapi-link'] + ), + wsdl_selector=dict( + type='dict', + disposition='/properties/wsdlSelector', + options=dict( + wsdl_service_name=dict( + type='str', + disposition='wsdlServiceName' + ), + wsdl_endpoint_name=dict( + type='str', + disposition='wsdlEndpointName' + ) + ) + ), + api_type=dict( + type='str', + disposition='/properties/apiType', + choices=['http', 'soap'] + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.service_name = None + self.api_id = None + + self.results = dict(changed=False) + self.mgmt_client = None + self.state = None + self.url = None + self.status_code = [200, 201, 202] + self.to_do = Actions.NoAction + + self.body = {} + self.query_parameters = {} + self.query_parameters['api-version'] = '2020-06-01-preview' + self.header_parameters = {} + self.header_parameters['Content-Type'] = 'application/json; charset=utf-8' + + super(AzureApiManagement, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def get_url(self): + return '/subscriptions' + '/' + self.subscription_id \ + + '/resourceGroups' + '/' + self.resource_group \ + + '/providers' + '/Microsoft.ApiManagement' + '/service' \ + + '/' + self.service_name + '/apis' + '/' + self.api_id + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()): + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + self.body[key] = kwargs[key] + + # https://docs.microsoft.com/en-us/azure/templates/microsoft.apimanagement/service/apis + self.inflate_parameters(self.module_arg_spec, self.body, 0) + self.url = self.get_url() + old_response = None + response = None + + self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient, + base_url=self._cloud_environment.endpoints.resource_manager) + + old_response = self.get_resource() + + if not old_response: + self.log("Api instance does not exist in the given service.") + if self.state == 'present': + self.to_do = Actions.Create + else: + self.log("Old instance didn't exist") + else: + self.log("Api instance already exists in the given service.") + if self.state == 'absent': + self.to_do = Actions.Delete + else: + modifiers = {} + self.create_compare_modifiers(self.module_arg_spec, '', modifiers) + self.results['modifiers'] = modifiers + self.results['compare'] = [] + if not self.default_compare(modifiers, self.body, old_response, '', self.results): + self.to_do = Actions.Update + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log('Create and Update the Api instance.') + + if self.check_mode: + self.results['changed'] = True + return self.results + + response = self.create_and_update_resource() + self.results['changed'] = True + + elif self.to_do == Actions.Delete: + self.log('Api instance deleted.') + + if self.check_mode: + self.results['changed'] = True + return self.results + + self.delete_resource() + self.results['changed'] = True + else: + self.log('No change in Api instance.') + self.results['changed'] = False + response = old_response + + if response: + self.results["id"] = response["id"] + + return self.results + + # This function will create and update resource on the api management service. + def create_and_update_resource(self): + + try: + response = self.mgmt_client.query( + self.url, + 'PUT', + self.query_parameters, + self.header_parameters, + self.body, + self.status_code, + 600, + 30, + ) + except CloudError as exc: + self.log('Error while creating/updating the Api instance.') + self.fail('Error creating the Api instance: {0}'.format(str(exc))) + try: + response = json.loads(response.text) + except Exception: + response = {'text': response.text} + + return response + + def delete_resource(self): + isDeleted = False + try: + response = self.mgmt_client.query( + self.url, + 'DELETE', + self.query_parameters, + self.header_parameters, + None, + self.status_code, + 600, + 30, + ) + isDeleted = True + except CloudError as e: + self.log('Error attempting to delete the Api instance.') + self.fail('Error deleting the Api instance: {0}'.format(str(e))) + + return isDeleted + + def get_resource(self): + isFound = False + try: + response = self.mgmt_client.query( + self.url, + 'GET', + self.query_parameters, + self.header_parameters, + None, + self.status_code, + 600, + 30, + ) + isFound = True + response = json.loads(response.text) + self.log("Response : {0}".format(response)) + except CloudError as e: + self.log('Could not find the Api instance from the given parameters.') + if isFound is True: + return response + return False + + +def main(): + AzureApiManagement() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_apimanagement_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_apimanagement_info.py new file mode 100644 index 000000000..d223ede2e --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_apimanagement_info.py @@ -0,0 +1,275 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 Sakar Mehra (@sakar97), Nikhil Patne (@nikhilpatne) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_apimanagement_info +version_added: "1.6.0" +short_description: Get the infomation of the API Instance +description: + - Get the information of api instance. +options: + resource_group: + description: + - The name of the resource group. + required: true + type: str + service_name: + description: + - The name of the API Management service. + required: true + type: str + expand_api_version_set: + description: + - Include full ApiVersionSet resource in response + type: bool + include_not_tagged_apis: + description: + - Included not tagged APIs in the response. + type: bool + api_id: + description: + - API revision identifier. It must be unique in the current API Management service instance. + type: str +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags +author: + - Sakar Mehra (@sakar97) + - Nikhil Patne (@nikhilpatne) + +''' + +EXAMPLES = ''' + - name: Get the information of api + azure_rm_apimanagement_info: + resource_group: myResourceGroup + service_name: myService + - name: Get the information of api + azure_rm_apimanagement_info: + resource_group: myResourceGroup + service_name: myService + api_id: testApi +''' + +RETURN = ''' +api: + description: + - A list of dict results where the key is the name of the Api and the values are the facts for that Api. + returned: always + type: complex + contains: + api_name: + description: + - The api name provided by the user. + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: null + name: + description: + - Resource name. + returned: always + type: str + sample: null + type: + description: + - Resource type for API Management resource. + returned: always + type: str + sample: null + properties: + description: + - Api entity contract properties. + returned: always + type: dict + sample: null + +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_rest import GenericRestClient +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt +from copy import deepcopy +import time +import json +import re +try: + from msrestazure.azure_exceptions import CloudError +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureApiManagementInfo(AzureRMModuleBaseExt): + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + service_name=dict( + type='str', + required=True + ), + expand_api_version_set=dict( + type='bool' + ), + include_not_tagged_apis=dict( + type='bool' + ), + api_id=dict( + type='str' + ) + ) + + self.resource_group = None + self.service_name = None + self.tags = None + self.expand_api_version_set = None + self.include_not_tagged_apis = None + self.api_id = None + + self.results = dict(changed=False) + self.mgmt_client = None + self.state = None + self.url = None + self.status_code = [200] + + self.query_parameters = {} + self.query_parameters['api-version'] = '2020-06-01-preview' + self.header_parameters = {} + self.header_parameters['Content-Type'] = 'application/json; charset=utf-8' + + super(AzureApiManagementInfo, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()): + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + self.body[key] = kwargs[key] + + self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient, + base_url=self._cloud_environment.endpoints.resource_manager) + + if (self.resource_group is not None and + self.service_name is not None and + self.api_id is not None): + self.results['api'] = self.get_api_info() + elif (self.resource_group is not None and + self.service_name is not None): + self.results['api'] = self.listbytags() + elif (self.resource_group is not None and + self.service_name is not None): + self.results['api'] = self.listbyservice() + return self.results + + def get_url(self): + return '/subscriptions' + '/' + self.subscription_id \ + + '/resourceGroups' + '/' + self.resource_group \ + + '/providers' + '/Microsoft.ApiManagement' + '/service' \ + + '/' + self.service_name + '/apis' + '/' + self.api_id + + def get_url_bytags(self): + return '/subscriptions' + '/' + self.subscription_id \ + + '/resourceGroups' + '/' + self.resource_group \ + + '/providers' + '/Microsoft.ApiManagement' + '/service' \ + + '/' + self.service_name + '/apisByTags' + + def get_url_byservice(self): + return '/subscriptions' + '/' + self.subscription_id \ + + '/resourceGroups' + '/' + self.resource_group \ + + '/providers' + '/Microsoft.ApiManagement' + '/service' \ + + '/' + self.service_name + '/apis' + + def get_api_info(self): + self.url = self.get_url() + response = None + + try: + response = self.mgmt_client.query( + self.url, + 'GET', + self.query_parameters, + self.header_parameters, + None, + self.status_code, + 600, + 30, + ) + except CloudError as e: + self.log('Could not get the information.{0}'.format(e)) + try: + response = json.loads(response.text) + except Exception: + return None + + return response + + def listbytags(self): + self.url = self.get_url_bytags() + response = None + try: + response = self.mgmt_client.query( + self.url, + 'GET', + self.query_parameters, + self.header_parameters, + None, + self.status_code, + 600, + 30, + ) + except CloudError as e: + self.log('Could not get info for the given api tags {0}'.format(e)) + try: + response = json.loads(response.text) + except Exception: + return None + + return response + + def listbyservice(self): + self.url = self.get_url_byservice() + response = None + try: + response = self.mgmt_client.query( + self.url, + 'GET', + self.query_parameters, + self.header_parameters, + None, + self.status_code, + 600, + 30, + ) + response = json.loads(response.text) + except CloudError as e: + self.log('Could not get info for a given services.{0}'.format(e)) + try: + response = json.loads(response.text) + except Exception: + return None + + return response + + +def main(): + AzureApiManagementInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_apimanagementservice.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_apimanagementservice.py new file mode 100644 index 000000000..0c2860191 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_apimanagementservice.py @@ -0,0 +1,345 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 Nikhil Patne (@nikhilpatne), Sakar Mehra (@sakar97) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_apimanagementservice +version_added: '1.5.0' +short_description: Manage Azure ApiManagementService instance +description: + - Create and delete instance of Azure ApiManagementService. + - Updates are not currently supported, this feature should be added in a later release. +options: + resource_group: + description: + - The name of the resource group. + required: true + type: str + name: + description: + - Service name. + required: true + type: str + location: + description: + - Location of the Api management service. + type: str + publisher_name: + description: + - Publisher name. + type: str + publisher_email: + description: + - Publisher email. + type: str + sku_name: + description: + - Name of the Sku. + choices: + - Developer + - Standard + - Premium + - Basic + - Consumption + type: str + sku_capacity: + description: + - Capacity of the SKU (number of deployed units of the SKU). + type: int + state: + description: + - Assert the state of the ApiManagementService. + - Use C(present) to create or update an ApiManagementService. + - Use C(absent) to delete an ApiManagementService. + type: str + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags +author: + - Nikhil Patne (@nikhilpatne) + - Sakar Mehra (@sakar97) +''' + +EXAMPLES = ''' +- name: Create Api Management Service + azure_rm_apimanagementservice: + resource_group: myResourceGroup + name: myService + publisher_email: user@example.com + publisher_name: Publisher Name + sku_name: Developer + sku_capacity: 1 + +- name: Delete Api Management Service + azure_rm_apimanagementservice: + resource_group: myResourceGroup + name: myService + state: absent +''' + +RETURN = ''' +id: + description: + - Resource ID. + returned: always + type: str + sample: null +''' + +import time +import json +import re +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_rest import GenericRestClient +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt +from copy import deepcopy +try: + from msrestazure.azure_exceptions import CloudError +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMApiManagementService(AzureRMModuleBaseExt): + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + updatable=False, + disposition='resourceGroupName', + required=True + ), + name=dict( + type='str', + updatable=False, + disposition='serviceName', + required=True + ), + location=dict( + type='str', + updatable=False, + disposition='location' + ), + publisher_name=dict( + type='str', + disposition='/properties/publisherName' + ), + publisher_email=dict( + type='str', + disposition='/properties/publisherEmail' + ), + sku_name=dict( + type='str', + disposition='/sku/name', + choices=['Developer', + 'Standard', + 'Premium', + 'Basic', + 'Consumption'] + ), + sku_capacity=dict( + type='int', + disposition='/sku/capacity' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.name = None + self.location = None + + self.results = dict(changed=False) + self.mgmt_client = None + self.state = None + self.url = None + self.status_code = [200, 201, 202] + self.to_do = Actions.NoAction + + self.body = {} + self.query_parameters = {} + self.query_parameters['api-version'] = '2020-06-01-preview' + self.header_parameters = {} + self.header_parameters['Content-Type'] = 'application/json; charset=utf-8' + + super(AzureRMApiManagementService, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()): + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + self.body[key] = kwargs[key] + + self.inflate_parameters(self.module_arg_spec, self.body, 0) + + old_response = None + response = None + + self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient, + base_url=self._cloud_environment.endpoints.resource_manager) + + resource_group = self.get_resource_group(self.resource_group) + + if self.location is None: + self.location = resource_group.location + + self.url = ('/subscriptions' + + '/{{ subscription_id }}' + + '/resourceGroups' + + '/{{ resource_group }}' + + '/providers' + + '/Microsoft.ApiManagement' + + '/service' + + '/{{ service_name }}') + self.url = self.url.replace('{{ subscription_id }}', self.subscription_id) + self.url = self.url.replace('{{ resource_group }}', self.resource_group) + self.url = self.url.replace('{{ service_name }}', self.name) + + old_response = self.get_resource() + + if not old_response: + self.log("ApiManagementService instance doesn't exist") + + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + self.log('ApiManagementService instance already exists') + + if self.state == 'absent': + self.to_do = Actions.Delete + else: + modifiers = {} + self.create_compare_modifiers(self.module_arg_spec, '', modifiers) + self.results['modifiers'] = modifiers + self.results['compare'] = [] + if not self.default_compare(modifiers, self.body, old_response, '', self.results): + self.to_do = Actions.Update + + self.body['location'] = self.location + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log('Need to Create / Update the ApiManagementService instance') + + if self.check_mode: + self.results['changed'] = True + return self.results + + response = self.create_update_resource() + + # if not old_response: + self.results['changed'] = True + # else: + # self.results['changed'] = old_response.__ne__(response) + self.log('Creation / Update done') + elif self.to_do == Actions.Delete: + self.log('To delete ApiManagementService instance') + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_resource() + + # make sure instance is actually deleted, for some Azure resources, instance is hanging around. + while self.get_resource(): + time.sleep(20) + else: + self.log('ApiManagementService instance unchanged') + self.results['changed'] = False + response = old_response + + if response: + self.results["id"] = response["id"] + + return self.results + + def create_update_resource(self): + # Creating / Updating the ApiManagementService instance. + try: + response = self.mgmt_client.query(self.url, + 'PUT', + self.query_parameters, + self.header_parameters, + self.body, + self.status_code, + 600, + 30) + except CloudError as exc: + self.log('Error attempting to create the ApiManagementService instance.') + self.fail('Error creating the ApiManagementService instance: {0}'.format(str(exc))) + + try: + response = json.loads(response.text) + except Exception: + response = {'text': response.text} + pass + + return response + + def delete_resource(self): + # Deleting the ApiManagementService instance. + try: + response = self.mgmt_client.query(self.url, + 'DELETE', + self.query_parameters, + self.header_parameters, + None, + self.status_code, + 600, + 30) + except CloudError as e: + self.log('Error attempting to delete the ApiManagementService instance.') + self.fail('Error deleting the ApiManagementService instance: {0}'.format(str(e))) + + return True + + def get_resource(self): + # Checking if the ApiManagementService instance is present + found = False + try: + response = self.mgmt_client.query(self.url, + 'GET', + self.query_parameters, + self.header_parameters, + None, + self.status_code, + 600, + 30) + found = True + self.log("Response : {0}".format(response)) + except CloudError as e: + self.log('Did not find the ApiManagementService instance.') + if found is True: + return json.loads(response.text) + + return False + + +def main(): + AzureRMApiManagementService() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_apimanagementservice_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_apimanagementservice_info.py new file mode 100644 index 000000000..72891fe14 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_apimanagementservice_info.py @@ -0,0 +1,284 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 Nikhil Patne (@nikhilpatne) and Sakar Mehra (@sakar97) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_apimanagementservice_info +version_added: '1.5.0' +short_description: Get ApiManagementService info +description: + - Get info of ApiManagementService. +options: + resource_group: + description: + - The name of the resource group. + type: str + name: + description: + - Resource name. + type: str + +extends_documentation_fragment: + - azure.azcollection.azure +author: + - Nikhil Patne (@nikhilpatne) + - Sakar Mehra (@Sakar97) + +''' + +EXAMPLES = ''' +- name: Get Api Management Service By Name and Resource Group + azure_rm_apimanagementservice_info: + resource_group: myResourceGroup + name: myApiName + +- name: Get Api Management Service By Resource Group + azure_rm_apimanagementservice_info: + resource_group: myResourceGroup + +- name: Get Api Management Service By Subscription + azure_rm_apimanagementservice_info: +''' + +RETURN = ''' +api_management_service: + description: + - A list of dict results where the key is the name of the ApiManagementService. + - The values are the facts for that ApiManagementService. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: /subscriptions/xxx-xxx/resourceGroups/myResourceGroup/providers/Microsoft.ApiManagement/service/myPolicy + name: + description: + - Resource name. + returned: always + type: str + sample: myPolicy + type: + description: + - Resource type for API Management resource is set to Microsoft.ApiManagement. + returned: always + type: str + sample: Microsoft.ApiManagement/service + tags: + description: + - Resource tags. + returned: always + type: dict + sample: {'key1':'value1'} + properties: + description: + - Properties of the API Management service. + returned: always + type: dict + sample: null + location: + description: + - Resource location. + type: str + returned: always + sample: 'East US' + sku: + description: + - SKU properties of the API Management service. + returned: always + type: str + sample: Developer + etag: + description: + - ETag of the resource. + returned: always + type: str + sample: AAAAAAAsQK8= + zones: + description: + - Zone of the resource. + type: str + returned: always + sample: null + identity: + description: + - Identity of the resource. + type: str + returned: always + sample: null +''' + +import time +import json +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_rest import GenericRestClient +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt +from copy import deepcopy +try: + from msrestazure.azure_exceptions import CloudError +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMApiManagementServiceInfo(AzureRMModuleBaseExt): + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str' + ), + name=dict( + type='str' + ) + ) + + self.resource_group = None + self.name = None + + self.results = dict(changed=False) + self.mgmt_client = None + self.state = None + self.url = None + self.status_code = [200] + + self.query_parameters = {} + self.query_parameters['api-version'] = '2020-06-01-preview' + self.header_parameters = {} + self.header_parameters['Content-Type'] = 'application/json; charset=utf-8' + + self.mgmt_client = None + super(AzureRMApiManagementServiceInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient, + base_url=self._cloud_environment.endpoints.resource_manager) + + if (self.resource_group is not None and self.name is not None): + self.results['api_management_service'] = self.get() + elif (self.resource_group is not None): + self.results['api_management_service'] = self.listbyresourcegroup() + else: + self.results['api_management_service'] = self.list() + return self.results + + def get(self): + response = None + results = {} + self.url = ('/subscriptions' + + '/{{ subscription_id }}' + + '/resourceGroups' + + '/{{ resource_group }}' + + '/providers' + + '/Microsoft.ApiManagement' + + '/service' + + '/{{ service_name }}') + self.url = self.url.replace('{{ subscription_id }}', self.subscription_id) + self.url = self.url.replace('{{ resource_group }}', self.resource_group) + self.url = self.url.replace('{{ service_name }}', self.name) + + try: + response = self.mgmt_client.query(self.url, + 'GET', + self.query_parameters, + self.header_parameters, + None, + self.status_code, + 600, + 30) + results = json.loads(response.text) + except CloudError as e: + self.log('Could not get info for @(Model.ModuleOperationNameUpper).') + + return self.format_item(results) + + def listbyresourcegroup(self): + response = None + results = {} + # prepare url + self.url = ('/subscriptions' + + '/{{ subscription_id }}' + + '/resourceGroups' + + '/{{ resource_group }}' + + '/providers' + + '/Microsoft.ApiManagement' + + '/service') + self.url = self.url.replace('{{ subscription_id }}', self.subscription_id) + self.url = self.url.replace('{{ resource_group }}', self.resource_group) + + try: + response = self.mgmt_client.query(self.url, + 'GET', + self.query_parameters, + self.header_parameters, + None, + self.status_code, + 600, + 30) + results = json.loads(response.text) + except CloudError as e: + self.log('Could not get info for @(Model.ModuleOperationNameUpper).') + + return [self.format_item(x) for x in results['value']] if results['value'] else [] + + def list(self): + response = None + results = {} + # prepare url + self.url = ('/subscriptions' + + '/{{ subscription_id }}' + + '/providers' + + '/Microsoft.ApiManagement' + + '/service') + self.url = self.url.replace('{{ subscription_id }}', self.subscription_id) + + try: + response = self.mgmt_client.query(self.url, + 'GET', + self.query_parameters, + self.header_parameters, + None, + self.status_code, + 600, + 30) + results = json.loads(response.text) + except CloudError as e: + self.log('Could not get info for @(Model.ModuleOperationNameUpper).') + + return [self.format_item(x) for x in results['value']] if results['value'] else [] + + def format_item(self, item): + if item: + d = { + 'id': item['id'], + 'name': item['name'], + 'type': item['type'], + 'sku': item['sku']['name'], + 'identity': item['identity'], + 'zones': item['zones'], + 'location': item['location'], + 'etag': item['etag'], + 'properties': item['properties'] + } + else: + return dict() + return d + + +def main(): + AzureRMApiManagementServiceInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_appgateway.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_appgateway.py new file mode 100644 index 000000000..01f3b2eb5 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_appgateway.py @@ -0,0 +1,2431 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Zim Kalinowski, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_appgateway +version_added: "0.1.2" +short_description: Manage Application Gateway instance +description: + - Create, update and delete instance of Application Gateway. + +options: + resource_group: + description: + - The name of the resource group. + required: True + name: + description: + - The name of the application gateway. + required: True + location: + description: + - Resource location. If not set, location from the resource group will be used as default. + sku: + description: + - SKU of the application gateway resource. + type: dict + suboptions: + name: + description: + - Name of an application gateway SKU. + choices: + - 'standard_small' + - 'standard_medium' + - 'standard_large' + - 'standard_v2' + - 'waf_medium' + - 'waf_large' + - 'waf_v2' + tier: + description: + - Tier of an application gateway. + choices: + - 'standard' + - 'standard_v2' + - 'waf' + - 'waf_v2' + capacity: + description: + - Capacity (instance count) of an application gateway. + ssl_policy: + description: + - SSL policy of the application gateway resource. + type: dict + suboptions: + disabled_ssl_protocols: + description: + - List of SSL protocols to be disabled on application gateway. + type: list + elements: str + choices: + - 'tls_v1_0' + - 'tls_v1_1' + - 'tls_v1_2' + policy_type: + description: + - Type of SSL Policy. + choices: + - 'predefined' + - 'custom' + policy_name: + description: + - Name of Ssl C(predefined) policy. + choices: + - 'ssl_policy20150501' + - 'ssl_policy20170401' + - 'ssl_policy20170401_s' + cipher_suites: + description: + - List of SSL cipher suites to be enabled in the specified order to application gateway. + type: list + elements: str + choices: + - tls_ecdhe_rsa_with_aes_256_gcm_sha384 + - tls_ecdhe_rsa_with_aes_128_gcm_sha256 + - tls_ecdhe_rsa_with_aes_256_cbc_sha384 + - tls_ecdhe_rsa_with_aes_128_cbc_sha256 + - tls_ecdhe_rsa_with_aes_256_cbc_sha + - tls_ecdhe_rsa_with_aes_128_cbc_sha + - tls_dhe_rsa_with_aes_256_gcm_sha384 + - tls_dhe_rsa_with_aes_128_gcm_sha256 + - tls_dhe_rsa_with_aes_256_cbc_sha + - tls_dhe_rsa_with_aes_128_cbc_sha + - tls_rsa_with_aes_256_gcm_sha384 + - tls_rsa_with_aes_128_gcm_sha256 + - tls_rsa_with_aes_256_cbc_sha256 + - tls_rsa_with_aes_128_cbc_sha256 + - tls_rsa_with_aes_256_cbc_sha + - tls_rsa_with_aes_128_cbc_sha + - tls_ecdhe_ecdsa_with_aes_256_gcm_sha384 + - tls_ecdhe_ecdsa_with_aes_128_gcm_sha256 + - tls_ecdhe_ecdsa_with_aes_256_cbc_sha384 + - tls_ecdhe_ecdsa_with_aes_128_cbc_sha256 + - tls_ecdhe_ecdsa_with_aes_256_cbc_sha + - tls_ecdhe_ecdsa_with_aes_128_cbc_sha + - tls_dhe_dss_with_aes_256_cbc_sha256 + - tls_dhe_dss_with_aes_128_cbc_sha256 + - tls_dhe_dss_with_aes_256_cbc_sha + - tls_dhe_dss_with_aes_128_cbc_sha + - tls_rsa_with_3des_ede_cbc_sha + - tls_dhe_dss_with_3des_ede_cbc_sha + min_protocol_version: + description: + - Minimum version of SSL protocol to be supported on application gateway. + choices: + - 'tls_v1_0' + - 'tls_v1_1' + - 'tls_v1_2' + gateway_ip_configurations: + description: + - List of subnets used by the application gateway. + type: list + elements: dict + suboptions: + subnet: + description: + - Reference of the subnet resource. A subnet from where application gateway gets its private address. + type: dict + suboptions: + id: + description: + - Full ID of the subnet resource. Required if I(name) and I(virtual_network_name) are not provided. + name: + description: + - Name of the subnet. Only used if I(virtual_network_name) is also provided. + virtual_network_name: + description: + - Name of the virtual network. Only used if I(name) is also provided. + name: + description: + - Name of the resource that is unique within a resource group. This name can be used to access the resource. + authentication_certificates: + description: + - Authentication certificates of the application gateway resource. + type: list + elements: dict + suboptions: + data: + description: + - Certificate public data - base64 encoded pfx. + name: + description: + - Name of the resource that is unique within a resource group. This name can be used to access the resource. + redirect_configurations: + description: + - Redirect configurations of the application gateway resource. + type: list + elements: dict + suboptions: + redirect_type: + description: + - Redirection type. + choices: + - 'permanent' + - 'found' + - 'see_other' + - 'temporary' + target_listener: + description: + - Reference to a listener to redirect the request to. + request_routing_rules: + description: + - List of c(basic) request routing rule names within the application gateway to which the redirect is bound. + version_added: "1.10.0" + url_path_maps: + description: + - List of URL path map names (c(path_based_routing) rules) within the application gateway to which the redirect is bound. + version_added: "1.10.0" + path_rules: + description: + - List of URL path rules within a c(path_based_routing) rule to which the redirect is bound. + type: list + elements: dict + suboptions: + name: + description: + - Name of the URL rule. + path_map_name: + description: + - Name of URL path map. + version_added: "1.10.0" + include_path: + description: + - Include path in the redirected url. + include_query_string: + description: + - Include query string in the redirected url. + name: + description: + - Name of the resource that is unique within a resource group. + rewrite_rule_sets: + description: + - List of rewrite configurations for the application gateway resource. + type: list + elements: dict + version_added: "1.11.0" + suboptions: + name: + description: + - Name of the rewrite rule set. + required: True + rewrite_rules: + description: + - List of rewrite rules. + required: True + type: list + elements: dict + suboptions: + name: + description: + - Name of the rewrite rule. + required: True + rule_sequence: + description: + - Sequence of the rule that determines the order of execution within the set. + required: True + conditions: + description: + - Conditions based on which the action set execution will be evaluated. + type: list + elements: dict + suboptions: + variable: + description: + - The parameter for the condition. + required: True + pattern: + description: + - The pattern, either fixed string or regular expression, that evaluates the truthfulness of the condition. + required: True + ignore_case: + description: + - Setting this value to true will force the pattern to do a case in-sensitive comparison. + type: bool + default: True + negate: + description: + - Setting this value to true will force to check the negation of the condition given by the user. + type: bool + default: False + action_set: + description: + - Set of actions to be done as part of the rewrite rule. + required: True + type: dict + suboptions: + request_header_configurations: + description: + - List of actions to be taken on request headers. + type: list + elements: dict + suboptions: + header_name: + description: + - Name of the header. + required: True + header_value: + description: + - Value of the header. + - Leave the parameter unset to remove the header. + response_header_configurations: + description: + - List of actions to be taken on response headers. + type: list + elements: dict + suboptions: + header_name: + description: + - Name of the header. + required: True + header_value: + description: + - Value of the header. + - Leave the parameter unset to remove the header. + url_configuration: + description: + - Action to be taken on the URL. + type: dict + suboptions: + modified_path: + description: + - Value to which the URL path will be rewriten. + - Leave parameter unset to keep the original URL path. + modified_query_string: + description: + - Value to which the URL query string will be rewriten. + - Leave parameter unset to keep the original URL query string. + reroute: + description: + - If set to true, will re-evaluate the path map provided in path-based request routing rules using modified path. + type: bool + default: False + ssl_certificates: + description: + - SSL certificates of the application gateway resource. + type: list + elements: dict + suboptions: + data: + description: + - Base-64 encoded pfx certificate. + - Only applicable in PUT Request. + password: + description: + - Password for the pfx file specified in I(data). + - Only applicable in PUT request. + name: + description: + - Name of the resource that is unique within a resource group. This name can be used to access the resource. + trusted_root_certificates: + version_added: "1.15.0" + description: + - Trusted Root certificates of the application gateway resource. + type: list + elements: dict + suboptions: + name: + description: + - Name of the trusted root certificate that is unique within an Application Gateway. + type: str + data: + description: + - Certificate public data. + type: str + key_vault_secret_id: + description: + - Secret Id of (base-64 encoded unencrypted pfx) 'Secret' or 'Certificate' object stored in KeyVault. + type: str + frontend_ip_configurations: + description: + - Frontend IP addresses of the application gateway resource. + type: list + elements: dict + suboptions: + private_ip_address: + description: + - PrivateIPAddress of the network interface IP Configuration. + private_ip_allocation_method: + description: + - PrivateIP allocation method. + choices: + - 'static' + - 'dynamic' + subnet: + description: + - Reference of the subnet resource. + type: dict + suboptions: + id: + description: + - Full ID of the subnet resource. Required if I(name) and I(virtual_network_name) are not provided. + name: + description: + - Name of the subnet. Only used if I(virtual_network_name) is also provided. + virtual_network_name: + description: + - Name of the virtual network. Only used if I(name) is also provided. + public_ip_address: + description: + - Reference of the PublicIP resource. + name: + description: + - Name of the resource that is unique within a resource group. This name can be used to access the resource. + frontend_ports: + description: + - List of frontend ports of the application gateway resource. + type: list + elements: dict + suboptions: + port: + description: + - Frontend port. + name: + description: + - Name of the resource that is unique within a resource group. This name can be used to access the resource. + backend_address_pools: + description: + - List of backend address pool of the application gateway resource. + type: list + elements: dict + suboptions: + backend_addresses: + description: + - List of backend addresses. + type: list + elements: dict + suboptions: + fqdn: + description: + - Fully qualified domain name (FQDN). + ip_address: + description: + - IP address. + name: + description: + - Resource that is unique within a resource group. This name can be used to access the resource. + probes: + description: + - Probes available to the application gateway resource. + type: list + elements: dict + suboptions: + name: + description: + - Name of the I(probe) that is unique within an Application Gateway. + protocol: + description: + - The protocol used for the I(probe). + choices: + - 'http' + - 'https' + host: + description: + - Host name to send the I(probe) to. + path: + description: + - Relative path of I(probe). + - Valid path starts from '/'. + - Probe is sent to ://:. + timeout: + description: + - The probe timeout in seconds. + - Probe marked as failed if valid response is not received with this timeout period. + - Acceptable values are from 1 second to 86400 seconds. + interval: + description: + - The probing interval in seconds. + - This is the time interval between two consecutive probes. + - Acceptable values are from 1 second to 86400 seconds. + unhealthy_threshold: + description: + - The I(probe) retry count. + - Backend server is marked down after consecutive probe failure count reaches UnhealthyThreshold. + - Acceptable values are from 1 second to 20. + pick_host_name_from_backend_http_settings: + description: + - Whether host header should be picked from the host name of the backend HTTP settings. Default value is false. + type: bool + default: False + backend_http_settings_collection: + description: + - Backend http settings of the application gateway resource. + type: list + elements: dict + suboptions: + probe: + description: + - Probe resource of an application gateway. + port: + description: + - The destination port on the backend. + protocol: + description: + - The protocol used to communicate with the backend. + choices: + - 'http' + - 'https' + cookie_based_affinity: + description: + - Cookie based affinity. + choices: + - 'enabled' + - 'disabled' + connection_draining: + version_added: "1.15.0" + description: + - Connection draining of the backend http settings resource. + type: dict + suboptions: + drain_timeout_in_sec: + description: + - The number of seconds connection draining is active. Acceptable values are from 1 second to 3600 seconds. + type: int + enabled: + description: + - Whether connection draining is enabled or not. + type: bool + request_timeout: + description: + - Request timeout in seconds. + - Application Gateway will fail the request if response is not received within RequestTimeout. + - Acceptable values are from 1 second to 86400 seconds. + authentication_certificates: + description: + - List of references to application gateway authentication certificates. + - Applicable only when C(cookie_based_affinity) is enabled, otherwise quietly ignored. + type: list + elements: dict + suboptions: + id: + description: + - Resource ID. + trusted_root_certificates: + version_added: "1.15.0" + description: + - Array of references to application gateway trusted root certificates. + - Can be the name of the trusted root certificate or full resource ID. + type: list + elements: str + host_name: + description: + - Host header to be sent to the backend servers. + pick_host_name_from_backend_address: + description: + - Whether host header should be picked from the host name of the backend server. Default value is false. + affinity_cookie_name: + description: + - Cookie name to use for the affinity cookie. + path: + description: + - Path which should be used as a prefix for all C(http) requests. + - Null means no path will be prefixed. Default value is null. + name: + description: + - Name of the resource that is unique within a resource group. This name can be used to access the resource. + http_listeners: + description: + - List of HTTP listeners of the application gateway resource. + type: list + elements: dict + suboptions: + frontend_ip_configuration: + description: + - Frontend IP configuration resource of an application gateway. + frontend_port: + description: + - Frontend port resource of an application gateway. + protocol: + description: + - Protocol of the C(http) listener. + choices: + - 'http' + - 'https' + host_name: + description: + - Host name of C(http) listener. + ssl_certificate: + description: + - SSL certificate resource of an application gateway. + require_server_name_indication: + description: + - Applicable only if I(protocol) is C(https). Enables SNI for multi-hosting. + name: + description: + - Name of the resource that is unique within a resource group. This name can be used to access the resource. + url_path_maps: + description: + - List of URL path maps of the application gateway resource. + type: list + elements: dict + suboptions: + name: + description: + - Name of the resource that is unique within the application gateway. This name can be used to access the resource. + default_backend_address_pool: + description: + - Backend address pool resource of the application gateway which will be used if no path matches occur. + - Mutually exclusive with I(default_redirect_configuration). + default_backend_http_settings: + description: + - Backend http settings resource of the application gateway; used with I(default_backend_address_pool). + default_rewrite_rule_set: + description: + - Default rewrite rule set for the path map. + - Can be the name of the rewrite rule set or full resource ID. + version_added: "1.11.0" + path_rules: + description: + - List of URL path rules. + type: list + elements: dict + suboptions: + name: + description: + - Name of the resource that is unique within the path map. + backend_address_pool: + description: + - Backend address pool resource of the application gateway which will be used if the path is matched. + - Mutually exclusive with I(redirect_configuration). + backend_http_settings: + description: + - Backend http settings resource of the application gateway; used for the path's I(backend_address_pool). + rewrite_rule_set: + description: + - Rewrite rule set for the path map. + - Can be the name of the rewrite rule set or full resource ID. + version_added: "1.11.0" + redirect_configuration: + description: + - Name of redirect configuration resource of the application gateway which will be used if the path is matched. + - Mutually exclusive with I(backend_address_pool). + version_added: "1.10.0" + paths: + description: + - List of paths. + type: list + elements: str + default_redirect_configuration: + description: + - Name of redirect configuration resource of the application gateway which will be used if no path matches occur. + - Mutually exclusive with I(default_backend_address_pool). + version_added: "1.10.0" + request_routing_rules: + description: + - List of request routing rules of the application gateway resource. + type: list + elements: dict + suboptions: + rule_type: + description: + - Rule type. + choices: + - 'basic' + - 'path_based_routing' + backend_address_pool: + description: + - Backend address pool resource of the application gateway. Not used if I(rule_type) is C(path_based_routing). + backend_http_settings: + description: + - Backend C(http) settings resource of the application gateway. + http_listener: + description: + - Http listener resource of the application gateway. + name: + description: + - Name of the resource that is unique within a resource group. This name can be used to access the resource. + redirect_configuration: + description: + - Redirect configuration resource of the application gateway. + url_path_map: + description: + - URL path map resource of the application gateway. Required if I(rule_type) is C(path_based_routing). + rewrite_rule_set: + description: + - Rewrite rule set for the path map. + - Can be the name of the rewrite rule set or full resource ID. + version_added: "1.11.0" + autoscale_configuration: + version_added: "1.15.0" + description: + - Autoscale configuration of the application gateway resource. + type: dict + suboptions: + max_capacity: + description: + - Upper bound on number of Application Gateway capacity. + type: int + min_capacity: + description: + - Lower bound on number of Application Gateway capacity. + type: int + enable_http2: + version_added: "1.15.0" + description: + - Whether HTTP2 is enabled on the application gateway resource. + type: bool + default: False + web_application_firewall_configuration: + version_added: "1.15.0" + description: + - Web application firewall configuration of the application gateway reosurce. + type: dict + suboptions: + disabled_rule_groups: + description: + - The disabled rule groups. + type: list + elements: dict + suboptions: + rule_group_name: + description: + - The name of the rule group that will be disabled. + type: str + rules: + description: + - The list of rules that will be disabled. If null, all rules of the rule group will be disabled. + type: list + elements: int + enabled: + description: + - Whether the web application firewall is enabled or not. + type: bool + exclusions: + description: + - The exclusion list. + type: list + elements: dict + suboptions: + match_variable: + description: + - The variable to be excluded. + type: str + selector: + description: + - When match_variable is a collection, operator used to specify which elements in the collection this exclusion applies to. + type: str + selector_match_operator: + description: + - When match_variable is a collection, operate on the selector to specify + which elements in the collection this exclusion applies to. + type: str + file_upload_limit_in_mb: + description: + - Maximum file upload size in Mb for WAF. + type: int + firewall_mode: + description: + - Web application firewall mode. + type: str + choices: + - 'Detection' + - 'Prevention' + max_request_body_size: + description: + - Maximum request body size for WAF. + type: int + max_request_body_size_in_kb: + description: + - Maximum request body size in Kb for WAF. + type: int + request_body_check: + description: + - Whether allow WAF to check request Body. + type: bool + rule_set_type: + description: + - The type of the web application firewall rule set. + - Possible values are 'OWASP'. + type: str + choices: + - 'OWASP' + rule_set_version: + description: + - The version of the rule set type. + type: str + gateway_state: + description: + - Start or Stop the application gateway. When specified, no updates will occur to the gateway. + type: str + choices: + - started + - stopped + state: + description: + - Assert the state of the application gateway. Use C(present) to create or update and C(absent) to delete. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' +- name: Create instance of Application Gateway + azure_rm_appgateway: + resource_group: myResourceGroup + name: myAppGateway + sku: + name: standard_small + tier: standard + capacity: 2 + gateway_ip_configurations: + - subnet: + id: "{{ subnet_id }}" + name: app_gateway_ip_config + frontend_ip_configurations: + - subnet: + id: "{{ subnet_id }}" + name: sample_gateway_frontend_ip_config + frontend_ports: + - port: 90 + name: ag_frontend_port + backend_address_pools: + - backend_addresses: + - ip_address: 10.0.0.4 + name: test_backend_address_pool + backend_http_settings_collection: + - port: 80 + protocol: http + cookie_based_affinity: enabled + connection_draining: + drain_timeout_in_sec: 60 + enabled: true + name: sample_appgateway_http_settings + http_listeners: + - frontend_ip_configuration: sample_gateway_frontend_ip_config + frontend_port: ag_frontend_port + name: sample_http_listener + request_routing_rules: + - rule_type: Basic + backend_address_pool: test_backend_address_pool + backend_http_settings: sample_appgateway_http_settings + http_listener: sample_http_listener + name: rule1 + +- name: Create instance of Application Gateway with custom trusted root certificate + azure_rm_appgateway: + resource_group: myResourceGroup + name: myAppGateway + sku: + name: standard_small + tier: standard + capacity: 2 + gateway_ip_configurations: + - subnet: + id: "{{ subnet_id }}" + name: app_gateway_ip_config + frontend_ip_configurations: + - subnet: + id: "{{ subnet_id }}" + name: sample_gateway_frontend_ip_config + frontend_ports: + - port: 90 + name: ag_frontend_port + trusted_root_certificates: + - name: "root_cert" + key_vault_secret_id: "https://kv/secret" + backend_address_pools: + - backend_addresses: + - ip_address: 10.0.0.4 + name: test_backend_address_pool + backend_http_settings_collection: + - port: 80 + protocol: http + cookie_based_affinity: enabled + connection_draining: + drain_timeout_in_sec: 60 + enabled: true + name: sample_appgateway_http_settings + trusted_root_certificates: + - "root_cert" + http_listeners: + - frontend_ip_configuration: sample_gateway_frontend_ip_config + frontend_port: ag_frontend_port + name: sample_http_listener + request_routing_rules: + - rule_type: Basic + backend_address_pool: test_backend_address_pool + backend_http_settings: sample_appgateway_http_settings + http_listener: sample_http_listener + name: rule1 + +- name: Create instance of Application Gateway by looking up virtual network and subnet + azure_rm_appgateway: + resource_group: myResourceGroup + name: myAppGateway + sku: + name: standard_small + tier: standard + capacity: 2 + gateway_ip_configurations: + - subnet: + name: default + virtual_network_name: my-vnet + name: app_gateway_ip_config + frontend_ip_configurations: + - subnet: + name: default + virtual_network_name: my-vnet + name: sample_gateway_frontend_ip_config + frontend_ports: + - port: 90 + name: ag_frontend_port + backend_address_pools: + - backend_addresses: + - ip_address: 10.0.0.4 + name: test_backend_address_pool + backend_http_settings_collection: + - port: 80 + protocol: http + cookie_based_affinity: enabled + name: sample_appgateway_http_settings + http_listeners: + - frontend_ip_configuration: sample_gateway_frontend_ip_config + frontend_port: ag_frontend_port + name: sample_http_listener + request_routing_rules: + - rule_type: Basic + backend_address_pool: test_backend_address_pool + backend_http_settings: sample_appgateway_http_settings + http_listener: sample_http_listener + name: rule1 + +- name: Create instance of Application Gateway with path based rules + azure_rm_appgateway: + resource_group: myResourceGroup + name: myAppGateway + sku: + name: standard_small + tier: standard + capacity: 2 + gateway_ip_configurations: + - subnet: + id: "{{ subnet_id }}" + name: app_gateway_ip_config + frontend_ip_configurations: + - subnet: + id: "{{ subnet_id }}" + name: sample_gateway_frontend_ip_config + frontend_ports: + - port: 90 + name: ag_frontend_port + backend_address_pools: + - backend_addresses: + - ip_address: 10.0.0.4 + name: test_backend_address_pool + backend_http_settings_collection: + - port: 80 + protocol: http + cookie_based_affinity: enabled + name: sample_appgateway_http_settings + http_listeners: + - frontend_ip_configuration: sample_gateway_frontend_ip_config + frontend_port: ag_frontend_port + name: sample_http_listener + request_routing_rules: + - rule_type: path_based_routing + http_listener: sample_http_listener + name: rule1 + url_path_map: path_mappings + url_path_maps: + - name: path_mappings + default_backend_address_pool: test_backend_address_pool + default_backend_http_settings: sample_appgateway_http_settings + path_rules: + - name: path_rules + backend_address_pool: test_backend_address_pool + backend_http_settings: sample_appgateway_http_settings + paths: + - "/abc" + - "/123/*" + +- name: Create instance of Application Gateway with complex routing and redirect rules + azure_rm_appgateway: + resource_group: myResourceGroup + name: myComplexAppGateway + sku: + name: standard_small + tier: standard + capacity: 2 + ssl_policy: + policy_type: "predefined" + policy_name: "ssl_policy20170401_s" + ssl_certificates: + - name: ssl_cert + password: your-password + data: "{{ lookup('file', 'certfile') }}" + gateway_ip_configurations: + - subnet: + id: "{{ subnet_output.state.id }}" + name: app_gateway_ip_config + frontend_ip_configurations: + - subnet: + id: "{{ subnet_output.state.id }}" + name: sample_gateway_frontend_ip_config + frontend_ports: + - name: "inbound-http" + port: 80 + - name: "inbound-https" + port: 443 + backend_address_pools: + - name: test_backend_address_pool1 + backend_addresses: + - ip_address: 10.0.0.1 + - name: test_backend_address_pool2 + backend_addresses: + - ip_address: 10.0.0.2 + backend_http_settings_collection: + - name: "http-profile1" + port: 443 + protocol: https + pick_host_name_from_backend_address: true + probe: "http-probe1" + cookie_based_affinity: "Disabled" + - name: "http-profile2" + port: 8080 + protocol: http + pick_host_name_from_backend_address: true + probe: "http-probe2" + cookie_based_affinity: "Disabled" + http_listeners: + - name: "inbound-http" + protocol: "http" + frontend_ip_configuration: "sample_gateway_frontend_ip_config" + frontend_port: "inbound-http" + - name: "inbound-traffic1" + protocol: "https" + frontend_ip_configuration: "sample_gateway_frontend_ip_config" + frontend_port: "inbound-https" + host_name: "traffic1.example.com" + require_server_name_indication: true + ssl_certificate: "ssl_cert" + - name: "inbound-traffic2" + protocol: "https" + frontend_ip_configuration: "sample_gateway_frontend_ip_config" + frontend_port: "inbound-https" + host_name: "traffic2.example.com" + require_server_name_indication: true + ssl_certificate: "ssl_cert" + url_path_maps: + - name: "path_mappings" + default_redirect_configuration: "redirect-traffic1" + path_rules: + - name: "path_rules" + backend_address_pool: "test_backend_address_pool1" + backend_http_settings: "http-profile1" + paths: + - "/abc" + - "/123/*" + request_routing_rules: + - name: "app-routing1" + rule_type: "basic" + http_listener: "inbound-traffic1" + backend_address_pool: "test_backend_address_pool2" + backend_http_settings: "http-profile1" + - name: "app-routing2" + rule_type: "path_based_routing" + http_listener: "inbound-traffic2" + url_path_map: "path_mappings" + - name: "redirect-routing" + rule_type: "basic" + http_listener: "inbound-http" + redirect_configuration: "redirect-http" + probes: + - name: "http-probe1" + interval: 30 + path: "/abc" + protocol: "https" + pick_host_name_from_backend_http_settings: true + timeout: 30 + unhealthy_threshold: 2 + - name: "http-probe2" + interval: 30 + path: "/xyz" + protocol: "http" + pick_host_name_from_backend_http_settings: true + timeout: 30 + unhealthy_threshold: 2 + redirect_configurations: + - name: "redirect-http" + redirect_type: "permanent" + target_listener: "inbound-traffic1" + include_path: true + include_query_string: true + request_routing_rules: + - "redirect-routing" + - name: "redirect-traffic1" + redirect_type: "found" + target_listener: "inbound-traffic1" + include_path: true + include_query_string: true + url_path_maps: + - "path_mappings" + +- name: Create v2 instance of Application Gateway with rewrite rules + azure_rm_appgateway: + resource_group: myResourceGroup + name: myV2AppGateway + sku: + name: standard_v2 + tier: standard_v2 + capacity: 2 + ssl_policy: + policy_type: predefined + policy_name: ssl_policy20170401_s + ssl_certificates: + - name: ssl_cert + password: your-password + data: "{{ lookup('file', ssl_cert) }}" + gateway_ip_configurations: + - subnet: + id: "{{ subnet_output.state.id }}" + name: app_gateway_ip_config + frontend_ip_configurations: + - name: "public-inbound-ip" + public_ip_address: my-appgw-pip + frontend_ports: + - name: "inbound-http" + port: 80 + - name: "inbound-https" + port: 443 + backend_address_pools: + - name: test_backend_address_pool1 + backend_addresses: + - ip_address: 10.0.0.1 + - name: test_backend_address_pool2 + backend_addresses: + - ip_address: 10.0.0.2 + backend_http_settings_collection: + - name: "http-profile1" + port: 443 + protocol: https + pick_host_name_from_backend_address: true + probe: "http-probe1" + cookie_based_affinity: "Disabled" + - name: "http-profile2" + port: 8080 + protocol: http + pick_host_name_from_backend_address: true + probe: "http-probe2" + cookie_based_affinity: "Disabled" + http_listeners: + - name: "inbound-http" + protocol: "http" + frontend_ip_configuration: "public-inbound-ip" + frontend_port: "inbound-http" + - name: "inbound-traffic1" + protocol: "https" + frontend_ip_configuration: "public-inbound-ip" + frontend_port: "inbound-https" + host_name: "traffic1.example.com" + require_server_name_indication: true + ssl_certificate: "ssl_cert" + - name: "inbound-traffic2" + protocol: "https" + frontend_ip_configuration: "public-inbound-ip" + frontend_port: "inbound-https" + host_name: "traffic2.example.com" + require_server_name_indication: true + ssl_certificate: "ssl_cert" + url_path_maps: + - name: "path_mappings" + default_redirect_configuration: "redirect-traffic1" + default_rewrite_rule_set: "configure-headers" + path_rules: + - name: "path_rules" + backend_address_pool: "test_backend_address_pool1" + backend_http_settings: "http-profile1" + paths: + - "/abc" + - "/123/*" + request_routing_rules: + - name: "app-routing1" + rule_type: "basic" + http_listener: "inbound-traffic1" + backend_address_pool: "test_backend_address_pool2" + backend_http_settings: "http-profile1" + rewrite_rule_set: "configure-headers" + - name: "app-routing2" + rule_type: "path_based_routing" + http_listener: "inbound-traffic2" + url_path_map: "path_mappings" + - name: "redirect-routing" + rule_type: "basic" + http_listener: "inbound-http" + redirect_configuration: "redirect-http" + rewrite_rule_sets: + - name: "configure-headers" + rewrite_rules: + - name: "add-security-response-header" + rule_sequence: 1 + action_set: + response_header_configurations: + - header_name: "Strict-Transport-Security" + header_value: "max-age=31536000" + - name: "remove-backend-response-headers" + rule_sequence: 2 + action_set: + response_header_configurations: + - header_name: "Server" + - header_name: "X-Powered-By" + - name: "set-custom-header-condition" + rule_sequence: 3 + conditions: + - variable: "var_client_ip" + pattern: "1.1.1.1" + - variable: "http_req_Authorization" + pattern: "12345" + ignore_case: false + action_set: + request_header_configurations: + - header_name: "Foo" + header_value: "Bar" + probes: + - name: "http-probe1" + interval: 30 + path: "/abc" + protocol: "https" + pick_host_name_from_backend_http_settings: true + timeout: 30 + unhealthy_threshold: 2 + - name: "http-probe2" + interval: 30 + path: "/xyz" + protocol: "http" + pick_host_name_from_backend_http_settings: true + timeout: 30 + unhealthy_threshold: 2 + redirect_configurations: + - name: "redirect-http" + redirect_type: "permanent" + target_listener: "inbound-traffic1" + include_path: true + include_query_string: true + request_routing_rules: + - "redirect-routing" + - name: "redirect-traffic1" + redirect_type: "found" + target_listener: "inbound-traffic1" + include_path: true + include_query_string: true + url_path_maps: + - "path_mappings" + +- name: Create instance of Application Gateway with autoscale configuration + azure_rm_appgateway: + resource_group: myResourceGroup + name: myAppGateway + sku: + name: standard_small + tier: standard + autoscale_configuration: + max_capacity: 2 + min_capacity: 1 + gateway_ip_configurations: + - subnet: + id: "{{ subnet_id }}" + name: app_gateway_ip_config + frontend_ip_configurations: + - subnet: + id: "{{ subnet_id }}" + name: sample_gateway_frontend_ip_config + frontend_ports: + - port: 90 + name: ag_frontend_port + backend_address_pools: + - backend_addresses: + - ip_address: 10.0.0.4 + name: test_backend_address_pool + backend_http_settings_collection: + - port: 80 + protocol: http + cookie_based_affinity: enabled + name: sample_appgateway_http_settings + http_listeners: + - frontend_ip_configuration: sample_gateway_frontend_ip_config + frontend_port: ag_frontend_port + name: sample_http_listener + request_routing_rules: + - rule_type: Basic + backend_address_pool: test_backend_address_pool + backend_http_settings: sample_appgateway_http_settings + http_listener: sample_http_listener + name: rule1 + +- name: Create instance of Application Gateway waf_v2 with waf configuration + azure_rm_appgateway: + resource_group: myResourceGroup + name: myAppGateway + sku: + name: waf_v2 + tier: waf_v2 + capacity: 2 + gateway_ip_configurations: + - subnet: + id: "{{ subnet_id }}" + name: app_gateway_ip_config + frontend_ip_configurations: + - subnet: + id: "{{ subnet_id }}" + name: sample_gateway_frontend_ip_config + frontend_ports: + - port: 90 + name: ag_frontend_port + backend_address_pools: + - backend_addresses: + - ip_address: 10.0.0.4 + name: test_backend_address_pool + backend_http_settings_collection: + - port: 80 + protocol: http + cookie_based_affinity: enabled + name: sample_appgateway_http_settings + http_listeners: + - frontend_ip_configuration: sample_gateway_frontend_ip_config + frontend_port: ag_frontend_port + name: sample_http_listener + request_routing_rules: + - rule_type: Basic + backend_address_pool: test_backend_address_pool + backend_http_settings: sample_appgateway_http_settings + http_listener: sample_http_listener + name: rule1 + web_application_firewall_configuration: + - enabled: true + firewall_mode: Detection + rule_set_type: OWASP + rule_set_version: 3.0 + request_body_check: true + max_request_body_size_in_kb: 128 + file_upload_limit_in_mb: 100 + +- name: Stop an Application Gateway instance + azure_rm_appgateway: + resource_group: myResourceGroup + name: myAppGateway + gateway_state: stopped + +- name: Start an Application Gateway instance + azure_rm_appgateway: + resource_group: myResourceGroup + name: myAppGateway + gateway_state: started +''' + +RETURN = ''' +id: + description: + - Application gateway resource ID. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/applicationGateways/myAppGw +name: + description: + - Name of application gateway. + returned: always + type: str + sample: myAppGw +resource_group: + description: + - Name of resource group. + returned: always + type: str + sample: myResourceGroup +location: + description: + - Location of application gateway. + returned: always + type: str + sample: centralus +operational_state: + description: + - Operating state of application gateway. + returned: always + type: str + sample: Running +provisioning_state: + description: + - Provisioning state of application gateway. + returned: always + type: str + sample: Succeeded +''' + +import time +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +from copy import deepcopy +from ansible.module_utils.common.dict_transformations import ( + _snake_to_camel, dict_merge, recursive_diff, +) + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.core.polling import LROPoller + from msrestazure.tools import parse_resource_id, is_valid_resource_id +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete, Start, Stop = range(6) + + +sku_spec = dict( + capacity=dict(type='int'), + name=dict(type='str', choices=['standard_small', 'standard_medium', 'standard_large', 'standard_v2', 'waf_medium', 'waf_large', 'waf_v2']), + tier=dict(type='str', choices=['standard', 'standard_v2', 'waf', 'waf_v2']), +) + + +ssl_policy_spec = dict( + disabled_ssl_protocols=dict(type='list'), + policy_type=dict(type='str', choices=['predefined', 'custom']), + policy_name=dict(type='str', choices=['ssl_policy20150501', 'ssl_policy20170401', 'ssl_policy20170401_s']), + cipher_suites=dict(type='list'), + min_protocol_version=dict(type='str', choices=['tls_v1_0', 'tls_v1_1', 'tls_v1_2']) +) + + +probe_spec = dict( + host=dict(type='str'), + interval=dict(type='int'), + name=dict(type='str'), + path=dict(type='str'), + protocol=dict(type='str', choices=['http', 'https']), + timeout=dict(type='int'), + unhealthy_threshold=dict(type='int'), + pick_host_name_from_backend_http_settings=dict(type='bool', default=False) +) + + +redirect_path_rules_spec = dict( + name=dict(type='str'), + path_map_name=dict(type='str'), +) + + +redirect_configuration_spec = dict( + include_path=dict(type='bool'), + include_query_string=dict(type='bool'), + name=dict(type='str'), + redirect_type=dict(type='str', choices=['permanent', 'found', 'see_other', 'temporary']), + target_listener=dict(type='str'), + request_routing_rules=dict(type='list', elements='str'), + url_path_maps=dict(type='list', elements='str'), + path_rules=dict(type='list', elements='dict', options=redirect_path_rules_spec), +) + + +rewrite_condition_spec = dict( + variable=dict(type='str', required=True), + pattern=dict(type='str', required=True), + ignore_case=dict(type='bool', default=True), + negate=dict(type='bool', default=False), +) + + +rewrite_header_configuration_spec = dict( + header_name=dict(type='str', required=True), + header_value=dict(type='str', default=''), +) + + +rewrite_url_configuration_spec = dict( + modified_path=dict(type='str'), + modified_query_string=dict(type='str'), + reroute=dict(type='bool', default=False), +) + + +rewrite_action_set_spec = dict( + request_header_configurations=dict(type='list', elements='dict', options=rewrite_header_configuration_spec, default=[]), + response_header_configurations=dict(type='list', elements='dict', options=rewrite_header_configuration_spec, default=[]), + url_configuration=dict(type='dict', options=rewrite_url_configuration_spec), +) + + +rewrite_rule_spec = dict( + name=dict(type='str', required=True), + rule_sequence=dict(type='int', required=True), + conditions=dict(type='list', elements='dict', options=rewrite_condition_spec, default=[]), + action_set=dict(type='dict', required=True, options=rewrite_action_set_spec), +) + + +rewrite_rule_set_spec = dict( + name=dict(type='str', required=True), + rewrite_rules=dict(type='list', elements='dict', required=True, options=rewrite_rule_spec), +) + + +path_rules_spec = dict( + name=dict(type='str'), + backend_address_pool=dict(type='str'), + backend_http_settings=dict(type='str'), + redirect_configuration=dict(type='str'), + paths=dict(type='list', elements='str'), + rewrite_rule_set=dict(type='str'), +) + + +url_path_maps_spec = dict( + name=dict(type='str'), + default_backend_address_pool=dict(type='str'), + default_backend_http_settings=dict(type='str'), + path_rules=dict( + type='list', + elements='dict', + options=path_rules_spec, + mutually_exclusive=[('backend_address_pool', 'redirect_configuration')], + required_one_of=[('backend_address_pool', 'redirect_configuration')], + required_together=[('backend_address_pool', 'backend_http_settings')], + ), + default_redirect_configuration=dict(type='str'), + default_rewrite_rule_set=dict(type='str'), +) + +autoscale_configuration_spec = dict( + max_capacity=dict(type='int'), + min_capacity=dict(type='int'), +) + +waf_configuration_exclusions_spec = dict( + match_variable=dict(type='str'), + selector=dict(type='str'), + selector_match_operator=dict(type='str'), +) + +waf_configuration_disabled_rule_groups_spec = dict( + rule_group_name=dict(type='str'), + rules=dict(type='list', elements='int', default=[]), +) + +web_application_firewall_configuration_spec = dict( + enabled=dict(type='bool'), + firewall_mode=dict(type='str', choices=['Detection', 'Prevention']), + rule_set_type=dict(type='str', choices=['OWASP']), + rule_set_version=dict(type='str'), + request_body_check=dict(type='bool'), + max_request_body_size=dict(type='int'), + max_request_body_size_in_kb=dict(type='int'), + file_upload_limit_in_mb=dict(type='int'), + exclusions=dict(type='list', elements='dict', options=waf_configuration_exclusions_spec, default=[]), + disabled_rule_groups=dict(type='list', elements='dict', options=waf_configuration_disabled_rule_groups_spec, default=[]), +) + +trusted_root_certificates_spec = dict( + name=dict(type='str'), + data=dict(type='str'), + key_vault_secret_id=dict(type='str', default='') +) + + +class AzureRMApplicationGateways(AzureRMModuleBase): + """Configuration class for an Azure RM Application Gateway resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + location=dict( + type='str' + ), + sku=dict( + type='dict', + options=sku_spec, + ), + ssl_policy=dict( + type='dict', + options=ssl_policy_spec + ), + gateway_ip_configurations=dict( + type='list' + ), + authentication_certificates=dict( + type='list' + ), + ssl_certificates=dict( + type='list' + ), + trusted_root_certificates=dict( + type='list', + elements='dict', + options=trusted_root_certificates_spec + ), + redirect_configurations=dict( + type='list', + elements='dict', + options=redirect_configuration_spec + ), + rewrite_rule_sets=dict( + type='list', + elements='dict', + options=rewrite_rule_set_spec + ), + frontend_ip_configurations=dict( + type='list' + ), + frontend_ports=dict( + type='list' + ), + backend_address_pools=dict( + type='list' + ), + backend_http_settings_collection=dict( + type='list' + ), + probes=dict( + type='list', + elements='dict', + options=probe_spec + ), + http_listeners=dict( + type='list' + ), + url_path_maps=dict( + type='list', + elements='dict', + options=url_path_maps_spec, + mutually_exclusive=[('default_backend_address_pool', 'default_redirect_configuration')], + required_one_of=[('default_backend_address_pool', 'default_redirect_configuration')], + required_together=[('default_backend_address_pool', 'default_backend_http_settings')], + ), + request_routing_rules=dict( + type='list' + ), + autoscale_configuration=dict( + type='dict', + options=autoscale_configuration_spec, + ), + web_application_firewall_configuration=dict( + type='dict', + options=web_application_firewall_configuration_spec + ), + enable_http2=dict( + type='bool', + default=False + ), + gateway_state=dict( + type='str', + choices=['started', 'stopped'], + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.name = None + self.parameters = dict() + + self.results = dict(changed=False) + self.state = None + self.gateway_state = None + self.to_do = Actions.NoAction + + super(AzureRMApplicationGateways, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + """Main module execution method""" + for key in list(self.module_arg_spec.keys()) + ['tags']: + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + if key == "id": + self.parameters["id"] = kwargs[key] + elif key == "location": + self.parameters["location"] = kwargs[key] + elif key == "sku": + ev = kwargs[key] + if 'name' in ev: + if ev['name'] == 'standard_small': + ev['name'] = 'Standard_Small' + elif ev['name'] == 'standard_medium': + ev['name'] = 'Standard_Medium' + elif ev['name'] == 'standard_large': + ev['name'] = 'Standard_Large' + elif ev['name'] == 'standard_v2': + ev['name'] = 'Standard_v2' + elif ev['name'] == 'waf_medium': + ev['name'] = 'WAF_Medium' + elif ev['name'] == 'waf_large': + ev['name'] = 'WAF_Large' + elif ev['name'] == 'waf_v2': + ev['name'] = 'WAF_v2' + if 'tier' in ev: + if ev['tier'] == 'standard': + ev['tier'] = 'Standard' + if ev['tier'] == 'standard_v2': + ev['tier'] = 'Standard_v2' + elif ev['tier'] == 'waf': + ev['tier'] = 'WAF' + elif ev['tier'] == 'waf_v2': + ev['tier'] = 'WAF_v2' + self.parameters["sku"] = ev + elif key == "ssl_policy": + ev = kwargs[key] + if 'policy_type' in ev: + ev['policy_type'] = _snake_to_camel(ev['policy_type'], True) + if 'policy_name' in ev: + if ev['policy_name'] == 'ssl_policy20150501': + ev['policy_name'] = 'AppGwSslPolicy20150501' + elif ev['policy_name'] == 'ssl_policy20170401': + ev['policy_name'] = 'AppGwSslPolicy20170401' + elif ev['policy_name'] == 'ssl_policy20170401_s': + ev['policy_name'] = 'AppGwSslPolicy20170401S' + if 'min_protocol_version' in ev: + if ev['min_protocol_version'] == 'tls_v1_0': + ev['min_protocol_version'] = 'TLSv1_0' + elif ev['min_protocol_version'] == 'tls_v1_1': + ev['min_protocol_version'] = 'TLSv1_1' + elif ev['min_protocol_version'] == 'tls_v1_2': + ev['min_protocol_version'] = 'TLSv1_2' + if 'disabled_ssl_protocols' in ev: + protocols = ev['disabled_ssl_protocols'] + if protocols is not None: + for i in range(len(protocols)): + if protocols[i] == 'tls_v1_0': + protocols[i] = 'TLSv1_0' + elif protocols[i] == 'tls_v1_1': + protocols[i] = 'TLSv1_1' + elif protocols[i] == 'tls_v1_2': + protocols[i] = 'TLSv1_2' + if 'cipher_suites' in ev: + suites = ev['cipher_suites'] + if suites is not None: + for i in range(len(suites)): + suites[i] = suites[i].upper() + for prop_name in ['policy_name', 'min_protocol_version', 'disabled_ssl_protocols', 'cipher_suites']: + if prop_name in ev and ev[prop_name] is None: + # delete unspecified properties for clean comparison + del ev[prop_name] + self.parameters["ssl_policy"] = ev + elif key == "gateway_ip_configurations": + ev = kwargs[key] + for i in range(len(ev)): + item = ev[i] + if 'subnet' in item and 'name' in item['subnet'] and 'virtual_network_name' in item['subnet']: + id = subnet_id(self.subscription_id, + kwargs['resource_group'], + item['subnet']['virtual_network_name'], + item['subnet']['name']) + item['subnet'] = {'id': id} + self.parameters["gateway_ip_configurations"] = kwargs[key] + elif key == "authentication_certificates": + self.parameters["authentication_certificates"] = kwargs[key] + elif key == "ssl_certificates": + self.parameters["ssl_certificates"] = kwargs[key] + elif key == "trusted_root_certificates": + self.parameters["trusted_root_certificates"] = kwargs[key] + elif key == "redirect_configurations": + ev = kwargs[key] + for i in range(len(ev)): + item = ev[i] + if 'redirect_type' in item: + item['redirect_type'] = _snake_to_camel(item['redirect_type'], True) + if 'target_listener' in item: + id = http_listener_id(self.subscription_id, + kwargs['resource_group'], + kwargs['name'], + item['target_listener']) + item['target_listener'] = {'id': id} + if item['request_routing_rules']: + for j in range(len(item['request_routing_rules'])): + rule_name = item['request_routing_rules'][j] + id = request_routing_rule_id(self.subscription_id, + kwargs['resource_group'], + kwargs['name'], + rule_name) + item['request_routing_rules'][j] = {'id': id} + else: + del item['request_routing_rules'] + if item['url_path_maps']: + for j in range(len(item['url_path_maps'])): + pathmap_name = item['url_path_maps'][j] + id = url_path_map_id(self.subscription_id, + kwargs['resource_group'], + kwargs['name'], + pathmap_name) + item['url_path_maps'][j] = {'id': id} + else: + del item['url_path_maps'] + if item['path_rules']: + for j in range(len(item['path_rules'])): + pathrule = item['path_rules'][j] + if 'name' in pathrule and 'path_map_name' in pathrule: + id = url_path_rule_id(self.subscription_id, + kwargs['resource_group'], + kwargs['name'], + pathrule['path_map_name'], + pathrule['name']) + item['path_rules'][j] = {'id': id} + else: + del item['path_rules'] + self.parameters["redirect_configurations"] = ev + elif key == "rewrite_rule_sets": + ev = kwargs[key] + for i in range(len(ev)): + ev2 = ev[i]['rewrite_rules'] + for j in range(len(ev2)): + item2 = ev2[j] + if item2['action_set'].get('url_configuration'): + if not item2['action_set']['url_configuration'].get('modified_path'): + del item2['action_set']['url_configuration']['modified_path'] + if not item2['action_set']['url_configuration'].get('modified_query_string'): + del item2['action_set']['url_configuration']['modified_query_string'] + else: + del item2['action_set']['url_configuration'] + self.parameters["rewrite_rule_sets"] = ev + elif key == "frontend_ip_configurations": + ev = kwargs[key] + for i in range(len(ev)): + item = ev[i] + if 'private_ip_allocation_method' in item: + item['private_ip_allocation_method'] = _snake_to_camel(item['private_ip_allocation_method'], True) + if 'public_ip_address' in item: + id = public_ip_id(self.subscription_id, + kwargs['resource_group'], + item['public_ip_address']) + item['public_ip_address'] = {'id': id} + if 'subnet' in item and 'name' in item['subnet'] and 'virtual_network_name' in item['subnet']: + id = subnet_id(self.subscription_id, + kwargs['resource_group'], + item['subnet']['virtual_network_name'], + item['subnet']['name']) + item['subnet'] = {'id': id} + self.parameters["frontend_ip_configurations"] = ev + elif key == "frontend_ports": + self.parameters["frontend_ports"] = kwargs[key] + elif key == "backend_address_pools": + self.parameters["backend_address_pools"] = kwargs[key] + elif key == "probes": + ev = kwargs[key] + for i in range(len(ev)): + item = ev[i] + if 'protocol' in item: + item['protocol'] = _snake_to_camel(item['protocol'], True) + if 'pick_host_name_from_backend_http_settings' in item and item['pick_host_name_from_backend_http_settings'] and 'host' in item: + del item['host'] + self.parameters["probes"] = ev + elif key == "backend_http_settings_collection": + ev = kwargs[key] + for i in range(len(ev)): + item = ev[i] + if 'protocol' in item: + item['protocol'] = _snake_to_camel(item['protocol'], True) + if 'cookie_based_affinity' in item: + item['cookie_based_affinity'] = _snake_to_camel(item['cookie_based_affinity'], True) + if 'probe' in item: + id = probe_id(self.subscription_id, + kwargs['resource_group'], + kwargs['name'], + item['probe']) + item['probe'] = {'id': id} + if 'trusted_root_certificates' in item: + for j in range(len(item['trusted_root_certificates'])): + id = item['trusted_root_certificates'][j] + id = id if is_valid_resource_id(id) else trusted_root_certificate_id(self.subscription_id, + kwargs['resource_group'], + kwargs['name'], + id) + item['trusted_root_certificates'][j] = {'id': id} + self.parameters["backend_http_settings_collection"] = ev + elif key == "http_listeners": + ev = kwargs[key] + for i in range(len(ev)): + item = ev[i] + if 'frontend_ip_configuration' in item: + id = frontend_ip_configuration_id(self.subscription_id, + kwargs['resource_group'], + kwargs['name'], + item['frontend_ip_configuration']) + item['frontend_ip_configuration'] = {'id': id} + + if 'frontend_port' in item: + id = frontend_port_id(self.subscription_id, + kwargs['resource_group'], + kwargs['name'], + item['frontend_port']) + item['frontend_port'] = {'id': id} + if 'ssl_certificate' in item: + id = ssl_certificate_id(self.subscription_id, + kwargs['resource_group'], + kwargs['name'], + item['ssl_certificate']) + item['ssl_certificate'] = {'id': id} + if 'protocol' in item: + item['protocol'] = _snake_to_camel(item['protocol'], True) + ev[i] = item + self.parameters["http_listeners"] = ev + elif key == "url_path_maps": + ev = kwargs[key] + for i in range(len(ev)): + item = ev[i] + if item['default_backend_address_pool']: + id = backend_address_pool_id(self.subscription_id, + kwargs['resource_group'], + kwargs['name'], + item['default_backend_address_pool']) + item['default_backend_address_pool'] = {'id': id} + else: + del item['default_backend_address_pool'] + if item['default_backend_http_settings']: + id = backend_http_settings_id(self.subscription_id, + kwargs['resource_group'], + kwargs['name'], + item['default_backend_http_settings']) + item['default_backend_http_settings'] = {'id': id} + else: + del item['default_backend_http_settings'] + if 'path_rules' in item: + ev2 = item['path_rules'] + for j in range(len(ev2)): + item2 = ev2[j] + if item2['backend_address_pool']: + id = backend_address_pool_id(self.subscription_id, + kwargs['resource_group'], + kwargs['name'], + item2['backend_address_pool']) + item2['backend_address_pool'] = {'id': id} + else: + del item2['backend_address_pool'] + if item2['backend_http_settings']: + id = backend_http_settings_id(self.subscription_id, + kwargs['resource_group'], + kwargs['name'], + item2['backend_http_settings']) + item2['backend_http_settings'] = {'id': id} + else: + del item2['backend_http_settings'] + if item2['redirect_configuration']: + id = redirect_configuration_id(self.subscription_id, + kwargs['resource_group'], + kwargs['name'], + item2['redirect_configuration']) + item2['redirect_configuration'] = {'id': id} + else: + del item2['redirect_configuration'] + if item2['rewrite_rule_set']: + id = item2['rewrite_rule_set'] + id = id if is_valid_resource_id(id) else rewrite_rule_set_id(self.subscription_id, + kwargs['resource_group'], + kwargs['name'], + id) + item2['rewrite_rule_set'] = {'id': id} + else: + del item2['rewrite_rule_set'] + ev2[j] = item2 + if item['default_redirect_configuration']: + id = redirect_configuration_id(self.subscription_id, + kwargs['resource_group'], + kwargs['name'], + item['default_redirect_configuration']) + item['default_redirect_configuration'] = {'id': id} + else: + del item['default_redirect_configuration'] + if item['default_rewrite_rule_set']: + id = item['default_rewrite_rule_set'] + id = id if is_valid_resource_id(id) else rewrite_rule_set_id(self.subscription_id, + kwargs['resource_group'], + kwargs['name'], + id) + item['default_rewrite_rule_set'] = {'id': id} + else: + del item['default_rewrite_rule_set'] + ev[i] = item + self.parameters["url_path_maps"] = ev + elif key == "request_routing_rules": + ev = kwargs[key] + for i in range(len(ev)): + item = ev[i] + if 'rule_type' in item and item['rule_type'] == 'path_based_routing' and 'backend_address_pool' in item: + del item['backend_address_pool'] + if 'backend_address_pool' in item: + id = backend_address_pool_id(self.subscription_id, + kwargs['resource_group'], + kwargs['name'], + item['backend_address_pool']) + item['backend_address_pool'] = {'id': id} + if 'backend_http_settings' in item: + id = backend_http_settings_id(self.subscription_id, + kwargs['resource_group'], + kwargs['name'], + item['backend_http_settings']) + item['backend_http_settings'] = {'id': id} + if 'http_listener' in item: + id = http_listener_id(self.subscription_id, + kwargs['resource_group'], + kwargs['name'], + item['http_listener']) + item['http_listener'] = {'id': id} + if 'protocol' in item: + item['protocol'] = _snake_to_camel(item['protocol'], True) + if 'rule_type' in item: + item['rule_type'] = _snake_to_camel(item['rule_type'], True) + if 'redirect_configuration' in item: + id = redirect_configuration_id(self.subscription_id, + kwargs['resource_group'], + kwargs['name'], + item['redirect_configuration']) + item['redirect_configuration'] = {'id': id} + if 'url_path_map' in item: + id = url_path_map_id(self.subscription_id, + kwargs['resource_group'], + kwargs['name'], + item['url_path_map']) + item['url_path_map'] = {'id': id} + if item.get('rewrite_rule_set'): + id = item.get('rewrite_rule_set') + id = id if is_valid_resource_id(id) else rewrite_rule_set_id(self.subscription_id, + kwargs['resource_group'], + kwargs['name'], + id) + item['rewrite_rule_set'] = {'id': id} + ev[i] = item + self.parameters["request_routing_rules"] = ev + elif key == "etag": + self.parameters["etag"] = kwargs[key] + elif key == "autoscale_configuration": + self.parameters["autoscale_configuration"] = kwargs[key] + elif key == "web_application_firewall_configuration": + self.parameters["web_application_firewall_configuration"] = kwargs[key] + elif key == "enable_http2": + self.parameters["enable_http2"] = kwargs[key] + + response = None + + resource_group = self.get_resource_group(self.resource_group) + + if "location" not in self.parameters: + self.parameters["location"] = resource_group.location + + old_response = self.get_applicationgateway() + + if not old_response: + self.log("Application Gateway instance doesn't exist") + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + self.log("Application Gateway instance already exists") + if self.state == 'absent': + self.to_do = Actions.Delete + elif self.state == 'present': + self.log("Need to check if Application Gateway instance has to be deleted or may be updated") + self.to_do = Actions.Update + + if (self.to_do == Actions.Update): + if (old_response['operational_state'] == 'Stopped' and self.gateway_state == 'started'): + self.to_do = Actions.Start + elif (old_response['operational_state'] == 'Running' and self.gateway_state == 'stopped'): + self.to_do = Actions.Stop + elif ((old_response['operational_state'] == 'Stopped' and self.gateway_state == 'stopped') or + (old_response['operational_state'] == 'Running' and self.gateway_state == 'started')): + self.to_do = Actions.NoAction + elif (self.parameters['location'] != old_response['location'] or + self.parameters['enable_http2'] != old_response['enable_http2'] or + self.parameters['sku']['name'] != old_response['sku']['name'] or + self.parameters['sku']['tier'] != old_response['sku']['tier'] or + self.parameters['sku'].get('capacity', None) != old_response['sku'].get('capacity', None) or + not compare_arrays(old_response, self.parameters, 'authentication_certificates') or + not compare_dicts(old_response, self.parameters, 'ssl_policy') or + not compare_arrays(old_response, self.parameters, 'gateway_ip_configurations') or + not compare_arrays(old_response, self.parameters, 'redirect_configurations') or + not compare_arrays(old_response, self.parameters, 'rewrite_rule_sets') or + not compare_arrays(old_response, self.parameters, 'frontend_ip_configurations') or + not compare_arrays(old_response, self.parameters, 'frontend_ports') or + not compare_arrays(old_response, self.parameters, 'backend_address_pools') or + not compare_arrays(old_response, self.parameters, 'probes') or + not compare_arrays(old_response, self.parameters, 'backend_http_settings_collection') or + not compare_arrays(old_response, self.parameters, 'request_routing_rules') or + not compare_arrays(old_response, self.parameters, 'http_listeners') or + not compare_arrays(old_response, self.parameters, 'url_path_maps') or + not compare_arrays(old_response, self.parameters, 'trusted_root_certificates') or + not compare_dicts(old_response, self.parameters, 'autoscale_configuration') or + not compare_dicts(old_response, self.parameters, 'web_application_firewall_configuration')): + self.to_do = Actions.Update + else: + self.to_do = Actions.NoAction + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log("Need to Create / Update the Application Gateway instance") + + if self.check_mode: + self.results['changed'] = True + self.results["parameters"] = self.parameters + return self.results + + response = self.create_update_applicationgateway() + + if not old_response: + self.results['changed'] = True + else: + self.results['changed'] = old_response.__ne__(response) + self.log("Creation / Update done") + elif (self.to_do == Actions.Start) or (self.to_do == Actions.Stop): + self.log("Need to Start / Stop the Application Gateway instance") + self.results['changed'] = True + response = old_response + + if self.check_mode: + return self.results + elif self.to_do == Actions.Start: + self.start_applicationgateway() + response["operational_state"] = "Running" + else: + self.stop_applicationgateway() + response["operational_state"] = "Stopped" + + elif self.to_do == Actions.Delete: + self.log("Application Gateway instance deleted") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_applicationgateway() + # make sure instance is actually deleted, for some Azure resources, instance is hanging around + # for some time after deletion -- this should be really fixed in Azure + while self.get_applicationgateway(): + time.sleep(20) + else: + self.log("Application Gateway instance unchanged") + self.results['changed'] = False + response = old_response + + if response: + self.results.update(self.format_response(response)) + + return self.results + + def create_update_applicationgateway(self): + ''' + Creates or updates Application Gateway with the specified configuration. + + :return: deserialized Application Gateway instance state dictionary + ''' + self.log("Creating / Updating the Application Gateway instance {0}".format(self.name)) + + try: + response = self.network_client.application_gateways.begin_create_or_update(resource_group_name=self.resource_group, + application_gateway_name=self.name, + parameters=self.parameters) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + + except Exception as exc: + self.log('Error attempting to create the Application Gateway instance.') + self.fail("Error creating the Application Gateway instance: {0}".format(str(exc))) + return response.as_dict() + + def delete_applicationgateway(self): + ''' + Deletes specified Application Gateway instance in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the Application Gateway instance {0}".format(self.name)) + try: + response = self.network_client.application_gateways.begin_delete(resource_group_name=self.resource_group, + application_gateway_name=self.name) + except Exception as e: + self.log('Error attempting to delete the Application Gateway instance.') + self.fail("Error deleting the Application Gateway instance: {0}".format(str(e))) + + return True + + def get_applicationgateway(self): + ''' + Gets the properties of the specified Application Gateway. + + :return: deserialized Application Gateway instance state dictionary + ''' + self.log("Checking if the Application Gateway instance {0} is present".format(self.name)) + found = False + try: + response = self.network_client.application_gateways.get(resource_group_name=self.resource_group, + application_gateway_name=self.name) + found = True + self.log("Response : {0}".format(response)) + self.log("Application Gateway instance : {0} found".format(response.name)) + except ResourceNotFoundError as e: + self.log('Did not find the Application Gateway instance.') + if found is True: + return response.as_dict() + + return False + + def start_applicationgateway(self): + self.log("Starting the Application Gateway instance {0}".format(self.name)) + try: + response = self.network_client.application_gateways.begin_start(resource_group_name=self.resource_group, + application_gateway_name=self.name) + if isinstance(response, LROPoller): + self.get_poller_result(response) + except Exception as e: + self.log('Error attempting to start the Application Gateway instance.') + self.fail("Error starting the Application Gateway instance: {0}".format(str(e))) + + def stop_applicationgateway(self): + self.log("Stopping the Application Gateway instance {0}".format(self.name)) + try: + response = self.network_client.application_gateways.begin_stop(resource_group_name=self.resource_group, + application_gateway_name=self.name) + if isinstance(response, LROPoller): + self.get_poller_result(response) + except Exception as e: + self.log('Error attempting to stop the Application Gateway instance.') + self.fail("Error stopping the Application Gateway instance: {0}".format(str(e))) + + def format_response(self, appgw_dict): + id = appgw_dict.get("id") + id_dict = parse_resource_id(id) + d = { + "id": id, + "name": appgw_dict.get("name"), + "resource_group": id_dict.get('resource_group', self.resource_group), + "location": appgw_dict.get("location"), + "operational_state": appgw_dict.get("operational_state"), + "provisioning_state": appgw_dict.get("provisioning_state"), + } + return d + + +def public_ip_id(subscription_id, resource_group_name, name): + """Generate the id for a frontend ip configuration""" + return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/publicIPAddresses/{2}'.format( + subscription_id, + resource_group_name, + name + ) + + +def frontend_ip_configuration_id(subscription_id, resource_group_name, appgateway_name, name): + """Generate the id for a frontend ip configuration""" + return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/applicationGateways/{2}/frontendIPConfigurations/{3}'.format( + subscription_id, + resource_group_name, + appgateway_name, + name + ) + + +def frontend_port_id(subscription_id, resource_group_name, appgateway_name, name): + """Generate the id for a frontend port""" + return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/applicationGateways/{2}/frontendPorts/{3}'.format( + subscription_id, + resource_group_name, + appgateway_name, + name + ) + + +def redirect_configuration_id(subscription_id, resource_group_name, appgateway_name, name): + """Generate the id for a redirect configuration""" + return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/applicationGateways/{2}/redirectConfigurations/{3}'.format( + subscription_id, + resource_group_name, + appgateway_name, + name + ) + + +def ssl_certificate_id(subscription_id, resource_group_name, ssl_certificate_name, name): + """Generate the id for a frontend port""" + return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/applicationGateways/{2}/sslCertificates/{3}'.format( + subscription_id, + resource_group_name, + ssl_certificate_name, + name + ) + + +def backend_address_pool_id(subscription_id, resource_group_name, appgateway_name, name): + """Generate the id for an address pool""" + return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/applicationGateways/{2}/backendAddressPools/{3}'.format( + subscription_id, + resource_group_name, + appgateway_name, + name + ) + + +def probe_id(subscription_id, resource_group_name, appgateway_name, name): + """Generate the id for a probe""" + return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/applicationGateways/{2}/probes/{3}'.format( + subscription_id, + resource_group_name, + appgateway_name, + name + ) + + +def backend_http_settings_id(subscription_id, resource_group_name, appgateway_name, name): + """Generate the id for a http settings""" + return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/applicationGateways/{2}/backendHttpSettingsCollection/{3}'.format( + subscription_id, + resource_group_name, + appgateway_name, + name + ) + + +def http_listener_id(subscription_id, resource_group_name, appgateway_name, name): + """Generate the id for a http listener""" + return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/applicationGateways/{2}/httpListeners/{3}'.format( + subscription_id, + resource_group_name, + appgateway_name, + name + ) + + +def url_path_map_id(subscription_id, resource_group_name, appgateway_name, name): + """Generate the id for a url path map""" + return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/applicationGateways/{2}/urlPathMaps/{3}'.format( + subscription_id, + resource_group_name, + appgateway_name, + name + ) + + +def url_path_rule_id(subscription_id, resource_group_name, appgateway_name, url_path_map_name, name): + """Generate the id for a url path map""" + return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/applicationGateways/{2}/urlPathMaps/{3}/pathRules/{4}'.format( + subscription_id, + resource_group_name, + appgateway_name, + url_path_map_name, + name + ) + + +def subnet_id(subscription_id, resource_group_name, virtual_network_name, name): + """Generate the id for a subnet in a virtual network""" + return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/virtualNetworks/{2}/subnets/{3}'.format( + subscription_id, + resource_group_name, + virtual_network_name, + name + ) + + +def ip_configuration_id(subscription_id, resource_group_name, network_interface_name, name): + """Generate the id for a request routing rule in an application gateway""" + return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/networkInterfaces/{2}/ipConfigurations/{3}'.format( + subscription_id, + resource_group_name, + network_interface_name, + name + ) + + +def request_routing_rule_id(subscription_id, resource_group_name, appgateway_name, name): + """Generate the id for a request routing rule in an application gateway""" + return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/applicationGateways/{2}/requestRoutingRules/{3}'.format( + subscription_id, + resource_group_name, + appgateway_name, + name + ) + + +def rewrite_rule_set_id(subscription_id, resource_group_name, appgateway_name, name): + """Generate the id for a rewrite rule set in an application gateway""" + return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/applicationGateways/{2}/rewriteRuleSets/{3}'.format( + subscription_id, + resource_group_name, + appgateway_name, + name + ) + + +def trusted_root_certificate_id(subscription_id, resource_group_name, appgateway_name, name): + """Generate the id for a trusted root certificate in an application gateway""" + return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/applicationGateways/{2}/trustedRootCertificates/{3}'.format( + subscription_id, + resource_group_name, + appgateway_name, + name + ) + + +def compare_dicts(old_params, new_params, param_name): + """Compare two dictionaries using recursive_diff method and assuming that null values coming from yaml input + are acting like absent values""" + oldd = old_params.get(param_name, {}) + newd = new_params.get(param_name, {}) + + if oldd == {} and newd == {}: + return True + + diffs = recursive_diff(oldd, newd) + if diffs is None: + return True + else: + actual_diffs = diffs[1] + return all(value is None or not value for value in actual_diffs.values()) + + +def compare_arrays(old_params, new_params, param_name): + '''Compare two arrays, including any nested properties on elements.''' + old = old_params.get(param_name, []) + new = new_params.get(param_name, []) + + if old == [] and new == []: + return True + + oldd = array_to_dict(old) + newd = array_to_dict(new) + + newd = dict_merge(oldd, newd) + return newd == oldd + + +def array_to_dict(array): + '''Converts list object to dictionary object, including any nested properties on elements.''' + new = {} + for index, item in enumerate(array): + new[index] = deepcopy(item) + if isinstance(item, dict): + for nested in item: + if isinstance(item[nested], list): + new[index][nested] = array_to_dict(item[nested]) + return new + + +def main(): + """Main execution""" + AzureRMApplicationGateways() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_appgateway_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_appgateway_info.py new file mode 100644 index 000000000..da5c5ecd2 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_appgateway_info.py @@ -0,0 +1,233 @@ +#!/usr/bin/python +# +# Copyright (c) 2021 Ross Bender (@l3ender) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_appgateway_info +version_added: "1.10.0" +short_description: Retrieve Application Gateway instance facts +description: + - Get facts for a Application Gateway instance. +options: + name: + description: + - Only show results for a specific application gateway. + type: str + resource_group: + description: + - Limit results by resource group. + type: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Ross Bender (@l3ender) +''' + +EXAMPLES = ''' + - name: Get facts for application gateway by name. + azure_rm_appgateway_info: + name: MyAppgw + resource_group: MyResourceGroup + + - name: Get facts for application gateways in resource group. + azure_rm_appgateway_info: + resource_group: MyResourceGroup + + - name: Get facts for all application gateways. + azure_rm_appgateway_info: +''' + +RETURN = ''' +gateways: + description: + - A list of dictionaries containing facts for an application gateway. + returned: always + type: list + elements: dict + contains: + id: + description: + - Application gateway resource ID. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/applicationGateways/myAppGw + name: + description: + - Name of application gateway. + returned: always + type: str + sample: myAppGw + resource_group: + description: + - Name of resource group. + returned: always + type: str + sample: myResourceGroup + location: + description: + - Location of application gateway. + returned: always + type: str + sample: centralus + operational_state: + description: + - Operating state of application gateway. + returned: always + type: str + sample: Running + provisioning_state: + description: + - Provisioning state of application gateway. + returned: always + type: str + sample: Succeeded + ssl_policy: + description: + - SSL policy of the application gateway. + returned: always + type: complex + version_added: "1.11.0" + contains: + policy_type: + description: + - The type of SSL policy. + returned: always + type: str + sample: predefined + policy_name: + description: + - The name of the SSL policy. + returned: always + type: str + sample: ssl_policy20170401_s +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +from ansible.module_utils.common.dict_transformations import _camel_to_snake + +try: + from azure.core.exceptions import ResourceNotFoundError + from msrestazure.tools import parse_resource_id +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMApplicationGatewayInfo(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type='str'), + resource_group=dict(type='str'), + ) + + self.results = dict( + changed=False, + ) + + self.name = None + self.resource_group = None + + super(AzureRMApplicationGatewayInfo, self).__init__(self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + facts_module=True) + + def exec_module(self, **kwargs): + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.name is not None: + self.results["gateways"] = self.get() + elif self.resource_group is not None: + self.results["gateways"] = self.list_by_rg() + else: + self.results["gateways"] = self.list_all() + + return self.results + + def get(self): + response = None + results = [] + try: + response = self.network_client.application_gateways.get(resource_group_name=self.resource_group, application_gateway_name=self.name) + except ResourceNotFoundError: + pass + + if response is not None: + results.append(self.format_response(response)) + + return results + + def list_by_rg(self): + response = None + results = [] + try: + response = self.network_client.application_gateways.list(resource_group_name=self.resource_group) + except Exception as exc: + request_id = exc.request_id if exc.request_id else '' + self.fail("Error listing application gateways in resource groups {0}: {1} - {2}".format(self.resource_group, request_id, str(exc))) + + for item in response: + results.append(self.format_response(item)) + + return results + + def list_all(self): + response = None + results = [] + try: + response = self.network_client.application_gateways.list_all() + except Exception as exc: + request_id = exc.request_id if exc.request_id else '' + self.fail("Error listing all application gateways: {0} - {1}".format(request_id, str(exc))) + + for item in response: + results.append(self.format_response(item)) + + return results + + def format_response(self, appgw): + d = appgw.as_dict() + id = d.get("id") + id_dict = parse_resource_id(id) + d = { + "id": id, + "name": d.get("name"), + "resource_group": id_dict.get('resource_group', self.resource_group), + "location": d.get("location"), + "operational_state": d.get("operational_state"), + "provisioning_state": d.get("provisioning_state"), + "ssl_policy": None if d.get("ssl_policy") is None else { + "policy_type": _camel_to_snake(d.get("ssl_policy").get("policy_type", None)), + "policy_name": self.ssl_policy_name(d.get("ssl_policy").get("policy_name", None)), + }, + } + return d + + def ssl_policy_name(self, policy_name): + if policy_name == "AppGwSslPolicy20150501": + return "ssl_policy20150501" + elif policy_name == "AppGwSslPolicy20170401": + return "ssl_policy20170401" + elif policy_name == "AppGwSslPolicy20170401S": + return "ssl_policy20170401_s" + return None + + +def main(): + AzureRMApplicationGatewayInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_applicationsecuritygroup.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_applicationsecuritygroup.py new file mode 100644 index 000000000..d135ca193 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_applicationsecuritygroup.py @@ -0,0 +1,245 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Yunge Zhu, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_applicationsecuritygroup +version_added: "0.1.2" +short_description: Manage Azure Application Security Group +description: + - Create, update and delete instance of Azure Application Security Group. + +options: + resource_group: + description: + - The name of the resource group. + required: True + name: + description: + - The name of the application security group. + required: True + location: + description: + - Resource location. If not set, location from the resource group will be used as default. + state: + description: + - Assert the state of the Application Security Group. + - Use C(present) to create or update an Application Security Group and C(absent) to delete it. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Yunge Zhu (@yungezz) + +''' + +EXAMPLES = ''' + - name: Create application security group + azure_rm_applicationsecuritygroup: + resource_group: myResourceGroup + name: mySecurityGroup + location: eastus + tags: + foo: bar +''' + +RETURN = ''' +id: + description: + - Resource id of the application security group. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/applicationSecurityGroups/ + mySecurityGroup" +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.core.polling import LROPoller + from msrestazure.azure_operation import AzureOperationPoller +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, CreateOrUpdate, Delete = range(3) + + +class AzureRMApplicationSecurityGroup(AzureRMModuleBase): + """Configuration class for an Azure RM Application Security Group resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + location=dict( + type='str' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.location = None + self.name = None + self.tags = None + + self.state = None + + self.results = dict(changed=False) + + self.to_do = Actions.NoAction + + super(AzureRMApplicationSecurityGroup, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()) + ['tags']: + if hasattr(self, key): + setattr(self, key, kwargs[key]) + + resource_group = self.get_resource_group(self.resource_group) + + if not self.location: + self.location = resource_group.location + + old_response = self.get_applicationsecuritygroup() + + if not old_response: + self.log("Application Security Group instance doesn't exist") + if self.state == 'present': + self.to_do = Actions.CreateOrUpdate + else: + self.log("Old instance didn't exist") + else: + self.log("Application Security Group instance already exists") + if self.state == 'present': + if self.check_update(old_response): + self.to_do = Actions.CreateOrUpdate + + update_tags, self.tags = self.update_tags(old_response.get('tags', None)) + if update_tags: + self.to_do = Actions.CreateOrUpdate + + elif self.state == 'absent': + self.to_do = Actions.Delete + + if self.to_do == Actions.CreateOrUpdate: + self.log("Need to Create / Update the Application Security Group instance") + self.results['changed'] = True + + if self.check_mode: + return self.results + + response = self.create_update_applicationsecuritygroup() + self.results['id'] = response['id'] + + elif self.to_do == Actions.Delete: + self.log("Delete Application Security Group instance") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_applicationsecuritygroup() + + return self.results + + def check_update(self, existing_asg): + if self.location and self.location.lower() != existing_asg['location'].lower(): + self.module.warn("location cannot be updated. Existing {0}, input {1}".format(existing_asg['location'], self.location)) + return False + + def create_update_applicationsecuritygroup(self): + ''' + Create or update Application Security Group. + + :return: deserialized Application Security Group instance state dictionary + ''' + self.log("Creating / Updating the Application Security Group instance {0}".format(self.name)) + + param = dict(name=self.name, + tags=self.tags, + location=self.location) + try: + response = self.network_client.application_security_groups.begin_create_or_update(resource_group_name=self.resource_group, + application_security_group_name=self.name, + parameters=param) + if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller): + response = self.get_poller_result(response) + + except Exception as exc: + self.log('Error creating/updating Application Security Group instance.') + self.fail("Error creating/updating Application Security Group instance: {0}".format(str(exc))) + return response.as_dict() + + def delete_applicationsecuritygroup(self): + ''' + Deletes specified Application Security Group instance. + + :return: True + ''' + self.log("Deleting the Application Security Group instance {0}".format(self.name)) + try: + response = self.network_client.application_security_groups.begin_delete(resource_group_name=self.resource_group, + application_security_group_name=self.name) + except Exception as e: + self.log('Error deleting the Application Security Group instance.') + self.fail("Error deleting the Application Security Group instance: {0}".format(str(e))) + + return True + + def get_applicationsecuritygroup(self): + ''' + Gets the properties of the specified Application Security Group. + + :return: deserialized Application Security Group instance state dictionary + ''' + self.log("Checking if the Application Security Group instance {0} is present".format(self.name)) + found = False + try: + response = self.network_client.application_security_groups.get(resource_group_name=self.resource_group, + application_security_group_name=self.name) + self.log("Response : {0}".format(response)) + self.log("Application Security Group instance : {0} found".format(response.name)) + return response.as_dict() + except ResourceNotFoundError as e: + self.log('Did not find the Application Security Group instance.') + return False + + +def main(): + """Main execution""" + AzureRMApplicationSecurityGroup() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_applicationsecuritygroup_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_applicationsecuritygroup_info.py new file mode 100644 index 000000000..738404b4e --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_applicationsecuritygroup_info.py @@ -0,0 +1,226 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Yunge Zhu, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_applicationsecuritygroup_info +version_added: "0.1.2" +short_description: Get Azure Application Security Group facts +description: + - Get facts of Azure Application Security Group. + +options: + resource_group: + description: + - The name of the resource group. + name: + description: + - The name of the application security group. + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Yunge Zhu (@yungezz) + +''' + +EXAMPLES = ''' + - name: List application security groups in specific resource group + azure_rm_applicationsecuritygroup_info: + resource_group: myResourceGroup + + - name: List application security groups in specific subscription + azure_rm_applicationsecuritygroup_info: + + - name: Get application security group by name + azure_rm_applicationsecuritygroup_info: + resource_group: myResourceGroup + name: myApplicationSecurityGroup + tags: + - foo +''' + +RETURN = ''' +applicationsecuritygroups: + description: + - List of application security groups. + returned: always + type: complex + contains: + id: + description: Id of the application security group. + type: str + returned: always + sample: + "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/applicationSecurityGroups/MyAsg" + location: + description: + - Location of the application security group. + type: str + returned: always + sample: eastus + name: + description: + - Name of the resource. + type: str + returned: always + sample: myAsg + provisioning_state: + description: + - Provisioning state of application security group. + type: str + returned: always + sample: Succeeded +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError + from msrestazure.azure_operation import AzureOperationPoller +except ImportError: + # This is handled in azure_rm_common + pass + + +def applicationsecuritygroup_to_dict(asg): + return dict( + id=asg.id, + location=asg.location, + name=asg.name, + tags=asg.tags, + provisioning_state=asg.provisioning_state + ) + + +class AzureRMApplicationSecurityGroupInfo(AzureRMModuleBase): + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str' + ), + name=dict( + type='str' + ), + tags=dict(type='list', elements='str') + ) + + self.resource_group = None + self.name = None + self.tags = None + + self.results = dict(changed=False) + + super(AzureRMApplicationSecurityGroupInfo, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + facts_module=True) + + def exec_module(self, **kwargs): + """Main module execution method""" + + is_old_facts = self.module._name == 'azure_rm_applicationsecuritygroup_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_applicationsecuritygroup_facts' module has been renamed to 'azure_rm_applicationsecuritygroup_info'", + version=(2.9, )) + + for key in list(self.module_arg_spec.keys()) + ['tags']: + if hasattr(self, key): + setattr(self, key, kwargs[key]) + + if self.name: + if self.resource_group: + self.results['applicationsecuritygroups'] = self.get() + else: + self.fail("resource_group is required when filtering by name") + elif self.resource_group: + self.results['applicationsecuritygroups'] = self.list_by_resource_group() + else: + self.results['applicationsecuritygroups'] = self.list_all() + + return self.results + + def get(self): + ''' + Gets the properties of the specified Application Security Group. + + :return: deserialized Application Security Group instance state dictionary + ''' + self.log("Get the Application Security Group instance {0}".format(self.name)) + + results = [] + try: + response = self.network_client.application_security_groups.get(resource_group_name=self.resource_group, + application_security_group_name=self.name) + self.log("Response : {0}".format(response)) + + if response and self.has_tags(response.tags, self.tags): + results.append(applicationsecuritygroup_to_dict(response)) + except ResourceNotFoundError as e: + self.fail('Did not find the Application Security Group instance.') + return results + + def list_by_resource_group(self): + ''' + Lists the properties of Application Security Groups in specific resource group. + + :return: deserialized Application Security Group instance state dictionary + ''' + self.log("Get the Application Security Groups in resource group {0}".format(self.resource_group)) + + results = [] + try: + response = list(self.network_client.application_security_groups.list(resource_group_name=self.resource_group)) + self.log("Response : {0}".format(response)) + + if response: + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(applicationsecuritygroup_to_dict(item)) + except ResourceNotFoundError as e: + self.log('Did not find the Application Security Group instance.') + return results + + def list_all(self): + ''' + Lists the properties of Application Security Groups in specific subscription. + + :return: deserialized Application Security Group instance state dictionary + ''' + self.log("Get the Application Security Groups in current subscription") + + results = [] + try: + response = list(self.network_client.application_security_groups.list_all()) + self.log("Response : {0}".format(response)) + + if response: + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(applicationsecuritygroup_to_dict(item)) + except ResourceNotFoundError as e: + self.log('Did not find the Application Security Group instance.') + return results + + +def main(): + """Main execution""" + AzureRMApplicationSecurityGroupInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_appserviceplan.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_appserviceplan.py new file mode 100644 index 000000000..2c0e883bf --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_appserviceplan.py @@ -0,0 +1,371 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Yunge Zhu, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_appserviceplan +version_added: "0.1.2" +short_description: Manage App Service Plan +description: + - Create, update and delete instance of App Service Plan. + +options: + resource_group: + description: + - Name of the resource group to which the resource belongs. + required: True + + name: + description: + - Unique name of the app service plan to create or update. + required: True + + location: + description: + - Resource location. If not set, location from the resource group will be used as default. + + sku: + description: + - The pricing tiers, e.g., C(F1), C(D1), C(B1), C(B2), C(B3), C(S1), C(P1), C(P1V2) etc. + - Please see U(https://azure.microsoft.com/en-us/pricing/details/app-service/plans/) for more detail. + - For Linux app service plan, please see U(https://azure.microsoft.com/en-us/pricing/details/app-service/linux/) for more detail. + is_linux: + description: + - Describe whether to host webapp on Linux worker. + type: bool + default: false + + number_of_workers: + description: + - Describe number of workers to be allocated. + + state: + description: + - Assert the state of the app service plan. + - Use C(present) to create or update an app service plan and C(absent) to delete it. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Yunge Zhu (@yungezz) + +''' + +EXAMPLES = ''' + - name: Create a windows app service plan + azure_rm_appserviceplan: + resource_group: myResourceGroup + name: myAppPlan + location: eastus + sku: S1 + + - name: Create a linux app service plan + azure_rm_appserviceplan: + resource_group: myResourceGroup + name: myAppPlan + location: eastus + sku: S1 + is_linux: true + number_of_workers: 1 + + - name: update sku of existing windows app service plan + azure_rm_appserviceplan: + resource_group: myResourceGroup + name: myAppPlan + location: eastus + sku: S2 +''' + +RETURN = ''' +azure_appserviceplan: + description: Facts about the current state of the app service plan. + returned: always + type: dict + sample: { + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/serverfarms/myAppPlan" + } +''' + +import time +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.core.polling import LROPoller + from msrestazure.azure_operation import AzureOperationPoller + from azure.mgmt.web.models import AppServicePlan, SkuDescription +except ImportError: + # This is handled in azure_rm_common + pass + + +def _normalize_sku(sku): + if sku is None: + return sku + + sku = sku.upper() + if sku == 'FREE': + return 'F1' + elif sku == 'SHARED': + return 'D1' + return sku + + +def get_sku_name(tier): + tier = tier.upper() + if tier == 'F1' or tier == "FREE": + return 'FREE' + elif tier == 'D1' or tier == "SHARED": + return 'SHARED' + elif tier in ['B1', 'B2', 'B3', 'BASIC']: + return 'BASIC' + elif tier in ['S1', 'S2', 'S3']: + return 'STANDARD' + elif tier in ['P1', 'P2', 'P3']: + return 'PREMIUM' + elif tier in ['P1V2', 'P2V2', 'P3V2']: + return 'PREMIUMV2' + else: + return None + + +def appserviceplan_to_dict(plan): + return dict( + id=plan.id, + name=plan.name, + kind=plan.kind, + location=plan.location, + reserved=plan.reserved, + is_linux=plan.reserved, + provisioning_state=plan.provisioning_state, + status=plan.status, + target_worker_count=plan.target_worker_count, + sku=dict( + name=plan.sku.name, + size=plan.sku.size, + tier=plan.sku.tier, + family=plan.sku.family, + capacity=plan.sku.capacity + ), + resource_group=plan.resource_group, + number_of_sites=plan.number_of_sites, + tags=plan.tags if plan.tags else None + ) + + +class AzureRMAppServicePlans(AzureRMModuleBase): + """Configuration class for an Azure RM App Service Plan resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + location=dict( + type='str' + ), + sku=dict( + type='str' + ), + is_linux=dict( + type='bool', + default=False + ), + number_of_workers=dict( + type='str' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.name = None + self.location = None + + self.sku = None + self.is_linux = None + self.number_of_workers = 1 + + self.tags = None + + self.results = dict( + changed=False, + ansible_facts=dict(azure_appserviceplan=None) + ) + self.state = None + + super(AzureRMAppServicePlans, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()) + ['tags']: + if kwargs[key]: + setattr(self, key, kwargs[key]) + + old_response = None + response = None + to_be_updated = False + + # set location + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + self.location = resource_group.location + + # get app service plan + old_response = self.get_plan() + + # if not existing + if not old_response: + self.log("App Service plan doesn't exist") + + if self.state == "present": + to_be_updated = True + + if not self.sku: + self.fail('Please specify sku in plan when creation') + + else: + # existing app service plan, do update + self.log("App Service Plan already exists") + + if self.state == 'present': + self.log('Result: {0}'.format(old_response)) + + update_tags, newtags = self.update_tags(old_response.get('tags', dict())) + + if update_tags: + to_be_updated = True + self.tags = newtags + + # check if sku changed + if self.sku and _normalize_sku(self.sku) != _normalize_sku(old_response['sku']['size']): + to_be_updated = True + + # check if number_of_workers changed + if self.number_of_workers and int(self.number_of_workers) != old_response['sku']['capacity']: + to_be_updated = True + + if self.is_linux and self.is_linux != old_response['reserved']: + self.fail("Operation not allowed: cannot update reserved of app service plan.") + + if old_response: + self.results['id'] = old_response['id'] + + if to_be_updated: + self.log('Need to Create/Update app service plan') + self.results['changed'] = True + + if self.check_mode: + return self.results + + response = self.create_or_update_plan() + self.results['id'] = response['id'] + + if self.state == 'absent' and old_response: + self.log("Delete app service plan") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_plan() + + self.log('App service plan instance deleted') + + return self.results + + def get_plan(self): + ''' + Gets app service plan + :return: deserialized app service plan dictionary + ''' + self.log("Get App Service Plan {0}".format(self.name)) + + try: + response = self.web_client.app_service_plans.get(resource_group_name=self.resource_group, name=self.name) + if response: + self.log("Response : {0}".format(response)) + self.log("App Service Plan : {0} found".format(response.name)) + + return appserviceplan_to_dict(response) + except ResourceNotFoundError: + self.log("Didn't find app service plan {0} in resource group {1}".format(self.name, self.resource_group)) + + return False + + def create_or_update_plan(self): + ''' + Creates app service plan + :return: deserialized app service plan dictionary + ''' + self.log("Create App Service Plan {0}".format(self.name)) + + try: + # normalize sku + sku = _normalize_sku(self.sku) + + sku_def = SkuDescription(tier=get_sku_name( + sku), name=sku, capacity=self.number_of_workers) + plan_def = AppServicePlan( + location=self.location, app_service_plan_name=self.name, sku=sku_def, reserved=self.is_linux, tags=self.tags if self.tags else None) + + response = self.web_client.app_service_plans.begin_create_or_update(resource_group_name=self.resource_group, + name=self.name, + app_service_plan=plan_def) + + if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller): + response = self.get_poller_result(response) + + self.log("Response : {0}".format(response)) + + return appserviceplan_to_dict(response) + except Exception as ex: + self.fail("Failed to create app service plan {0} in resource group {1}: {2}".format(self.name, self.resource_group, str(ex))) + + def delete_plan(self): + ''' + Deletes specified App service plan in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the App service plan {0}".format(self.name)) + try: + self.web_client.app_service_plans.delete(resource_group_name=self.resource_group, name=self.name) + except ResourceNotFoundError as e: + self.log('Error attempting to delete App service plan.') + self.fail("Error deleting the App service plan : {0}".format(str(e))) + + return True + + +def main(): + """Main execution""" + AzureRMAppServicePlans() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_appserviceplan_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_appserviceplan_info.py new file mode 100644 index 000000000..826ecc175 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_appserviceplan_info.py @@ -0,0 +1,238 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Yunge Zhu, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_appserviceplan_info + +version_added: "0.1.2" + +short_description: Get azure app service plan facts + +description: + - Get facts for a specific app service plan or all app service plans in a resource group, or all app service plan in current subscription. + +options: + name: + description: + - Only show results for a specific app service plan. + resource_group: + description: + - Limit results by resource group. + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Yunge Zhu (@yungezz) +''' + +EXAMPLES = ''' + - name: Get facts for app service plan by name + azure_rm_appserviceplan_info: + resource_group: myResourceGroup + name: myAppServicePlan + + - name: Get azure_rm_appserviceplan_facts for app service plan in resource group + azure_rm_appserviceplan_info: + resource_group: myResourceGroup + + - name: Get facts for app service plan with tags + azure_rm_appserviceplan_info: + tags: + - testtag + - foo:bar +''' + +RETURN = ''' +appserviceplans: + description: List of app service plans. + returned: always + type: complex + contains: + id: + description: Id of the app service plan. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/serverfarms/myPlan + name: + description: Name of the app service plan. + returned: always + type: str + resource_group: + description: Resource group of the app service plan. + returned: always + type: str + sample: myResourceGroup + location: + description: Location of the app service plan. + returned: always + type: str + kind: + description: Kind of the app service plan. + returned: always + type: str + sample: app + sku: + description: Sku of the app service plan. + returned: always + type: complex + contains: + name: + description: Name of sku. + returned: always + type: str + sample: S1 + family: + description: Family of sku. + returned: always + type: str + sample: S + size: + description: Size of sku. + returned: always + type: str + sample: S1 + tier: + description: Tier of sku. + returned: always + type: str + sample: Standard + capacity: + description: Capacity of sku. + returned: always + type: int + sample: 1 +''' +try: + from azure.core.exceptions import ResourceNotFoundError +except Exception: + # This is handled in azure_rm_common + pass + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +AZURE_OBJECT_CLASS = 'AppServicePlan' + + +class AzureRMAppServicePlanInfo(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type='str'), + resource_group=dict(type='str'), + tags=dict(type='list', elements='str') + ) + + self.results = dict(changed=False) + + self.name = None + self.resource_group = None + self.tags = None + self.info_level = None + + super(AzureRMAppServicePlanInfo, self).__init__(self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + facts_module=True) + + def exec_module(self, **kwargs): + + is_old_facts = self.module._name == 'azure_rm_appserviceplan_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_appserviceplan_facts' module has been renamed to 'azure_rm_appserviceplan_info'", version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.name: + self.results['appserviceplans'] = self.list_by_name() + elif self.resource_group: + self.results['appserviceplans'] = self.list_by_resource_group() + else: + self.results['appserviceplans'] = self.list_all() + + return self.results + + def list_by_name(self): + self.log('Get app service plan {0}'.format(self.name)) + item = None + result = [] + + try: + item = self.web_client.app_service_plans.get(resource_group_name=self.resource_group, name=self.name) + except ResourceNotFoundError: + pass + + if item and self.has_tags(item.tags, self.tags): + curated_result = self.construct_curated_plan(item) + result = [curated_result] + + return result + + def list_by_resource_group(self): + self.log('List app service plans in resource groups {0}'.format(self.resource_group)) + try: + response = list(self.web_client.app_service_plans.list_by_resource_group(resource_group_name=self.resource_group)) + except Exception as exc: + self.fail("Error listing app service plan in resource groups {0} - {1}".format(self.resource_group, str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + curated_output = self.construct_curated_plan(item) + results.append(curated_output) + return results + + def list_all(self): + self.log('List app service plans in current subscription') + try: + response = list(self.web_client.app_service_plans.list()) + except Exception as exc: + self.fail("Error listing app service plans: {0}".format(str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + curated_output = self.construct_curated_plan(item) + results.append(curated_output) + return results + + def construct_curated_plan(self, plan): + plan_facts = self.serialize_obj(plan, AZURE_OBJECT_CLASS) + + curated_output = dict() + curated_output['id'] = plan_facts['id'] + curated_output['name'] = plan_facts['name'] + curated_output['resource_group'] = plan_facts['properties']['resourceGroup'] + curated_output['location'] = plan_facts['location'] + curated_output['tags'] = plan_facts.get('tags', None) + curated_output['is_linux'] = False + curated_output['kind'] = plan_facts['kind'] + curated_output['sku'] = plan_facts['sku'] + + if plan_facts['properties'].get('reserved', None): + curated_output['is_linux'] = True + + return curated_output + + +def main(): + AzureRMAppServicePlanInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_automationaccount.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_automationaccount.py new file mode 100644 index 000000000..b8a728b16 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_automationaccount.py @@ -0,0 +1,174 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Yuwei Zhou, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_automationaccount +version_added: "0.1.2" +short_description: Manage Azure Automation account +description: + - Create, delete an Azure Automation account. +options: + resource_group: + description: + - Name of resource group. + type: str + required: true + name: + description: + - Name of the automation account. + type: str + required: true + state: + description: + - State of the automation account. Use C(present) to create or update a automation account and C(absent) to delete an automation account. + type: str + default: present + choices: + - absent + - present + location: + description: + - Location of the resource. + - If not specified, use resource group location. + type: str + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Yuwei Zhou (@yuwzho) + +''' + +EXAMPLES = ''' +- name: Create an automation account + azure_rm_automationaccount: + name: Testing + resource_group: myResourceGroup + +- name: Create an automation account + azure_rm_automationaccount: + name: Testing + resource_group: myResourceGroup + location: eastus +''' + +RETURN = ''' +id: + description: + - Automation account resource ID. + type: str + returned: success + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Automation/automationAccounts/Testing" +''' # NOQA + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + pass + + +class AzureRMAutomationAccount(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True), + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + location=dict(type='str') + ) + + self.results = dict( + changed=False, + id=None + ) + + self.resource_group = None + self.name = None + self.state = None + self.location = None + + super(AzureRMAutomationAccount, self).__init__(self.module_arg_spec, supports_check_mode=True) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + account = self.get_account() + changed = False + if self.state == 'present': + if not account: + if not self.location: + resource_group = self.get_resource_group(self.resource_group) + self.location = resource_group.location + param = self.automation_models.AutomationAccountCreateOrUpdateParameters( + location=self.location, + sku=self.automation_models.Sku(name='Basic'), + tags=self.tags + ) + changed = True + if not self.check_mode: + account = self.create_or_update(param) + elif self.tags: + update_tags, tags = self.update_tags(account.tags) + if update_tags: + changed = True + param = self.automation_models.AutomationAccountUpdateParameters( + tags=tags + ) + changed = True + if not self.check_mode: + self.update_account_tags(param) + if account: + self.results['id'] = account.id + elif account: + changed = True + if not self.check_mode: + self.delete_account() + self.results['changed'] = changed + return self.results + + def get_account(self): + try: + return self.automation_client.automation_account.get(self.resource_group, self.name) + except ResourceNotFoundError: + pass + + def create_or_update(self, param): + try: + return self.automation_client.automation_account.create_or_update(self.resource_group, self.name, param) + except Exception as exc: + self.fail('Error when creating automation account {0}: {1}'.format(self.name, exc.message)) + + def update_account_tags(self, param): + try: + return self.automation_client.automation_account.update(self.resource_group, self.name, param) + except Exception as exc: + self.fail('Error when updating automation account {0}: {1}'.format(self.name, exc.message)) + + def delete_account(self): + try: + return self.automation_client.automation_account.delete(self.resource_group, self.name) + except Exception as exc: + self.fail('Error when deleting automation account {0}: {1}'.format(self.name, exc.message)) + + +def main(): + AzureRMAutomationAccount() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_automationaccount_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_automationaccount_info.py new file mode 100644 index 000000000..defbb7f2a --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_automationaccount_info.py @@ -0,0 +1,384 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Yuwei Zhou, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_automationaccount_info +version_added: '0.1.2' +short_description: Get Azure automation account facts +description: + - Get facts of automation account. + +options: + resource_group: + description: + - The name of the resource group. + type: str + required: True + name: + description: + - The name of the automation account. + type: str + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + list_statistics: + description: + - List statistics details for a automation account. + - Note this will cost network overhead, suggest only used when I(name) set. + type: bool + list_usages: + description: + - List usage details for a automation account. + - Note this will cost network overhead, suggest only used when I(name) set. + type: bool + list_keys: + description: + - List keys for a automation account. + - Note this will cost network overhead, suggest only used when I(name) set. + type: bool + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Yuwei Zhou (@yuwzho) + +''' + +EXAMPLES = ''' +- name: Get details of an automation account + azure_rm_automationaccount_info: + name: Testing + resource_group: myResourceGroup + list_statistics: yes + list_usages: yes + list_keys: yes + tags: + - key + - key:value + +- name: List automation account in a resource group + azure_rm_automationaccount_info: + resource_group: myResourceGroup + +- name: List automation account in a resource group + azure_rm_automationaccount_info: +''' + +RETURN = ''' +automation_accounts: + description: + - List of automation account dicts. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + type: str + returned: always + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups + /myResourceGroup/providers/Microsoft.Automation/automationAccounts/Testing" + resource_group: + description: + - Resource group name. + type: str + returned: always + sample: myResourceGroup + name: + description: + - Resource name. + type: str + returned: always + sample: Testing + location: + description: + - Resource location. + type: str + returned: always + sample: eastus + creation_time: + description: + - Resource creation date time. + type: str + returned: always + sample: "2019-04-26T02:55:16.500Z" + last_modified_time: + description: + - Resource last modified date time. + type: str + returned: always + sample: "2019-04-26T02:55:16.500Z" + state: + description: + - Resource state. + type: str + returned: always + sample: ok + keys: + description: + - Resource keys. + type: complex + returned: always + contains: + key_name: + description: + - Name of the key. + type: str + returned: always + sample: Primary + permissions: + description: + - Permission of the key. + type: str + returned: always + sample: Full + value: + description: + - Value of the key. + type: str + returned: always + sample: "MbepKTO6IyGwml0GaKBkKN" + statistics: + description: + - Resource statistics. + type: complex + returned: always + contains: + counter_property: + description: + - Property value of the statistic. + type: str + returned: always + sample: New + counter_value: + description: + - Value of the statistic. + type: int + returned: always + sample: 0 + end_time: + description: + - EndTime of the statistic. + type: str + returned: always + sample: "2019-04-26T06:29:43.587518Z" + id: + description: + - ID of the statistic. + type: str + returned: always + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups + /myResourceGroup/providers/Microsoft.Automation/automationAccounts/Testing/statistics/New" + start_time: + description: + - StartTime of the statistic. + type: str + returned: always + sample: "2019-04-26T06:29:43.587518Z" + usages: + description: + - Resource usages. + type: complex + returned: always + contains: + current_value: + description: + - Current usage. + type: float + returned: always + sample: 0.0 + limit: + description: + - Max limit, C(-1) for unlimited. + type: int + returned: always + sample: -1 + name: + description: + - Usage counter name. + type: complex + returned: always + contains: + localized_value: + description: + - Localized name. + type: str + returned: always + sample: "SubscriptionUsage" + value: + description: + - Name value. + type: str + returned: always + sample: "SubscriptionUsage" + unit: + description: + - Usage unit name. + type: str + returned: always + sample: "Minute" + throttle_status: + description: + - Usage throttle status. + type: str + returned: always + sample: "NotThrottled" + +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from msrestazure.tools import parse_resource_id + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + pass + + +class AzureRMAutomationAccountInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str' + ), + tags=dict( + type='list', + elements='str' + ), + list_statistics=dict( + type='bool' + ), + list_usages=dict( + type='bool' + ), + list_keys=dict( + type='bool' + ) + ) + # store the results of the module operation + self.results = dict() + self.resource_group = None + self.name = None + self.tags = None + self.list_statistics = None + self.list_usages = None + self.list_keys = None + + super(AzureRMAutomationAccountInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False, facts_module=True) + + def exec_module(self, **kwargs): + + is_old_facts = self.module._name == 'azure_rm_automationaccount_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_automationaccount_facts' module has been renamed to 'azure_rm_automationaccount_info'", version=(2.9, )) + + for key in list(self.module_arg_spec): + setattr(self, key, kwargs[key]) + + if self.resource_group and self.name: + accounts = [self.get()] + elif self.resource_group: + accounts = self.list_by_resource_group() + else: + accounts = self.list_all() + self.results['automation_accounts'] = [self.to_dict(x) for x in accounts if self.has_tags(x.tags, self.tags)] + return self.results + + def to_dict(self, account): + if not account: + return None + id_dict = parse_resource_id(account.id) + result = account.as_dict() + result['resource_group'] = id_dict['resource_group'] + if self.list_statistics: + result['statistics'] = self.get_statics(id_dict['resource_group'], account.name) + if self.list_usages: + result['usages'] = self.get_usages(id_dict['resource_group'], account.name) + if self.list_keys: + result['keys'] = self.list_account_keys(id_dict['resource_group'], account.name) + return result + + def get(self): + try: + return self.automation_client.automation_account.get(self.resource_group, self.name) + except ResourceNotFoundError as exc: + self.fail('Error when getting automation account {0}: {1}'.format(self.name, exc.message)) + + def list_by_resource_group(self): + result = [] + try: + resp = self.automation_client.automation_account.list_by_resource_group(self.resource_group) + while True: + result.append(resp.next()) + except StopIteration: + pass + except Exception as exc: + self.fail('Error when listing automation account in resource group {0}: {1}'.format(self.resource_group, exc.message)) + return result + + def list_all(self): + result = [] + try: + resp = self.automation_client.automation_account.list() + while True: + result.append(resp.next()) + except StopIteration: + pass + except Exception as exc: + self.fail('Error when listing automation account: {0}'.format(exc.message)) + return result + + def get_statics(self, resource_group, name): + result = [] + try: + resp = self.automation_client.statistics.list_by_automation_account(resource_group, name) + while True: + result.append(resp.next().as_dict()) + except StopIteration: + pass + except Exception as exc: + self.fail('Error when getting statics for automation account {0}/{1}: {2}'.format(resource_group, name, exc.message)) + return result + + def get_usages(self, resource_group, name): + result = [] + try: + resp = self.automation_client.usages.list_by_automation_account(resource_group, name) + while True: + result.append(resp.next().as_dict()) + except StopIteration: + pass + except Exception as exc: + self.fail('Error when getting usage for automation account {0}/{1}: {2}'.format(resource_group, name, exc.message)) + return result + + def list_account_keys(self, resource_group, name): + try: + resp = self.automation_client.keys.list_by_automation_account(resource_group, name) + return [x.as_dict() for x in resp.keys] + except Exception as exc: + self.fail('Error when listing keys for automation account {0}/{1}: {2}'.format(resource_group, name, exc.message)) + + +def main(): + AzureRMAutomationAccountInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_automationrunbook.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_automationrunbook.py new file mode 100644 index 000000000..5f4cc10b1 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_automationrunbook.py @@ -0,0 +1,445 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Fred Sun, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_automationrunbook +version_added: '1.12.0' +short_description: Mange automation runbook +description: + - Create, update, delete or publish automation runbook. + +options: + resource_group: + description: + - The name of the resource group. + type: str + required: True + name: + description: + - The name of the automation runbook. + type: str + required: True + automation_account_name: + description: + - The name of the automation account. + type: str + required: True + location: + description: + - The location of the automation runbook. + type: str + description: + description: + - Sets the description. + type: str + runbook_type: + description: + - Sets the type of the runbook. + type: str + choices: + - Script + - Graph + - PowerShellWorkflow + - PowerShell + - GraphPowerShellWorkflow + - GraphPowerShell + log_activity_trace: + description: + - Sets the option to log activity trace of the runbook. + type: int + log_progress: + description: + - Sets progress log option. + type: bool + log_verbose: + description: + - Sets verbose log option. + type: bool + publish: + description: + - Whether to publish the runbook. + type: bool + state: + description: + - State of the automation runbook. Use C(present) to create or update a automation runbook and use C(absent) to delete. + type: str + default: present + choices: + - present + - absent + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Fred Sun (@Fred-sun) + +''' + +EXAMPLES = ''' +- name: create automation runbook with default parameters + azure_rm_automationrunbook: + resource_group: "{{ resource_group }}" + automation_account_name: "{{ account-name }}" + name: "{{ runbook-name }}" + runbook_type: "Script" + description: "Fred test" + +- name: create automation runbook with more parameters + azure_rm_automationrunbook: + resource_group: "{{ resource_group }}" + automation_account_name: "{{ account-name }}" + name: "{{ runbook-name }}" + runbook_type: "Script" + description: "Fred test" + log_activity_trace: 3 + log_progress: True + log_verbose: False + tags: + key1: value1 + +- name: Publish automation runbook + azure_rm_automationrunbook: + resource_group: "{{ resource_group }}" + automation_account_name: "{{ account-name }}" + name: "{{ runbook-name }}" + publish: True + +- name: Delete automation runbook + azure_rm_automationrunbook: + resource_group: "{{ resource_group }}" + automation_account_name: "{{ account-name }}" + name: "{{ runbook-name }}" + state: absent +''' + +RETURN = ''' +state: + description: + - List of automation runbook dicts. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + type: str + returned: always + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups + /myResourceGroup/providers/Microsoft.Automation/automationAccounts/test/runbooks/runbook_name" + resource_group: + description: + - Resource group name. + type: str + returned: always + sample: myResourceGroup + name: + description: + - Resource name. + type: str + returned: always + sample: Testing + location: + description: + - Resource location. + type: str + returned: always + sample: eastus + creation_time: + description: + - The resource creation date time. + type: str + returned: always + sample: "2022-03-24T06:30:54.116666+00:00" + job_count: + description: + - The job count of the runbook. + type: int + returned: always + sample: 3 + last_modified_by: + description: + - The resource last modifier. + type: str + returned: always + sample: Fred-sun + last_modified_time: + description: + - The last person to update the resource. + type: str + returned: always + sample: "2022-03-25T06:30:54.116666+00:00" + log_activity_trace: + description: + - The option to log activity trace of the runbook. + type: int + returned: always + sample: 3 + log_progress: + description: + - Whether show progress log option. + type: bool + returned: always + sample: True + log_verbose: + description: + - Whether show verbose log option. + type: bool + returned: always + sample: True + output_types: + description: + - The runbook output type. + type: list + returned: always + sample: [] + runbook_content_link: + description: + - The publish runbook content link. + type: str + returned: always + sample: null + state: + description: + - The resource state. + type: str + returned: always + sample: Published + tags: + description: + - The resource tags. + type: dict + returned: always + sample: { 'key1': 'value1' } + type: + description: + - The resource automation runbook type. + type: str + returned: always + sample: "Microsoft.Automation/AutomationAccounts/Runbooks" +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + pass + + +class AzureRMAutomationRunbook(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + automation_account_name=dict( + type='str', + required=True + ), + runbook_type=dict( + type='str', + choices=['Script', 'Graph', 'PowerShellWorkflow', 'PowerShell', 'GraphPowerShellWorkflow', 'GraphPowerShell'] + ), + description=dict( + type='str' + ), + location=dict( + type='str' + ), + log_activity_trace=dict( + type='int' + ), + log_progress=dict( + type='bool' + ), + publish=dict( + type='bool' + ), + log_verbose=dict( + type='bool' + ), + state=dict( + type='str', + choices=['present', 'absent'], + default='present' + ) + ) + # store the results of the module operation + self.results = dict() + self.resource_group = None + self.name = None + self.automation_account_name = None + self.runbook_type = None + self.description = None + self.log_activity_trace = None + self.log_progress = None + self.log_verbose = None + self.location = None + self.publish = None + + super(AzureRMAutomationRunbook, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=True) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec) + ['tags']: + setattr(self, key, kwargs[key]) + + if not self.location: + resource_group = self.get_resource_group(self.resource_group) + self.location = resource_group.location + + runbook = self.get() + changed = False + + if self.state == 'present': + if runbook: + update_parameter = dict() + if self.tags is not None: + update_tags, tags = self.update_tags(runbook['tags']) + if update_tags: + changed = True + update_parameter['tags'] = tags + if self.description is not None and self.description != runbook['description']: + changed = True + update_parameter['description'] = self.description + if self.log_activity_trace is not None and self.log_activity_trace != runbook['log_activity_trace']: + changed = True + update_parameter['log_activity_trace'] = self.log_activity_trace + if self.log_progress is not None and self.log_progress != runbook['log_progress']: + changed = True + update_parameter['log_progress'] = self.log_progress + if self.log_verbose is not None and self.log_verbose != runbook['log_verbose']: + changed = True + update_parameter['log_verbose'] = self.log_verbose + if self.location is not None and self.location != runbook['location']: + changed = True + self.fail("Parameter error (location): The parameters {0} cannot be update".format(self.location)) + if self.runbook_type is not None and self.runbook_type != runbook['runbook_type']: + changed = True + self.fail("Parameter error (runbook_type): The parameters {0} cannot be update".format(self.runbook_type)) + + if changed: + if not self.check_mode: + if update_parameter.get('log_activity_trace'): + runbook['log_activity_trace'] = update_parameter.get('log_activity_trace') + + paramters = self.automation_models.RunbookCreateOrUpdateParameters( + location=runbook['location'] if update_parameter.get('location') else update_parameter.get('location'), + log_verbose=runbook['log_verbose'] if update_parameter.get('log_verbose') else update_parameter.get('log_verbose'), + runbook_type=runbook['runbook_type'] if update_parameter.get('runbook_type') else update_parameter.get('runbook_type'), + description=runbook['description'] if update_parameter.get('description') else update_parameter.get('description'), + log_activity_trace=runbook['log_activity_trace'], + tags=runbook['tags'] if update_parameter.get('tags') else update_parameter.get('tags'), + log_progress=runbook['log_progress'] if update_parameter.get('log_progress') else update_parameter.get('log_progress') + ) + + runbook = self.update_runbook(update_parameter) + + else: + paramters = self.automation_models.RunbookCreateOrUpdateParameters( + location=self.location, + log_verbose=self.log_verbose, + runbook_type=self.runbook_type, + description=self.description, + log_activity_trace=self.log_activity_trace, + tags=self.tags, + log_progress=self.log_progress + ) + changed = True + if not self.check_mode: + runbook = self.create_or_update(paramters) + + if not self.check_mode: + if self.publish and runbook['state'] != 'Published': + changed = True + self.publish_runbook() + else: + changed = True + if not self.check_mode: + runbook = self.delete_automation_runbook() + + self.results['changed'] = changed + self.results['state'] = runbook + return self.results + + def get(self): + try: + response = self.automation_client.runbook.get(self.resource_group, self.automation_account_name, self.name) + return self.to_dict(response) + except ResourceNotFoundError: + pass + + def publish_runbook(self): + response = None + try: + response = self.automation_client.runbook.begin_publish(self.resource_group, self.automation_account_name, self.name) + except Exception as exc: + self.fail('Error when updating automation account {0}: {1}'.format(self.name, exc.message)) + + def update_runbook(self, parameters): + try: + response = self.automation_client.runbook.update(self.resource_group, self.automation_account_name, self.name, parameters) + return self.to_dict(response) + except Exception as exc: + self.fail('Error when updating automation account {0}: {1}'.format(self.name, exc.message)) + + def create_or_update(self, parameters): + try: + response = self.automation_client.runbook.create_or_update(self.resource_group, self.automation_account_name, self.name, parameters) + return self.to_dict(response) + except Exception as exc: + self.fail('Error when creating automation account {0}: {1}'.format(self.name, exc.message)) + + def delete_automation_runbook(self): + try: + return self.automation_client.runbook.delete(self.resource_group, self.automation_account_name, self.name) + except Exception as exc: + self.fail('Error when deleting automation account {0}: {1}'.format(self.name, exc.message)) + + def to_dict(self, runbook): + if not runbook: + return None + runbook_dict = dict( + id=runbook.id, + type=runbook.type, + name=runbook.name, + tags=runbook.tags, + location=runbook.location, + runbook_type=runbook.runbook_type, + runbook_content_link=runbook.publish_content_link, + state=runbook.state, + log_verbose=runbook.log_verbose, + log_progress=runbook.log_progress, + log_activity_trace=runbook.log_activity_trace, + job_count=runbook.job_count, + output_types=runbook.output_types, + last_modified_by=runbook.last_modified_by, + last_modified_time=runbook.last_modified_time, + creation_time=runbook.creation_time, + description=runbook.description + ) + return runbook_dict + + +def main(): + AzureRMAutomationRunbook() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_automationrunbook_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_automationrunbook_info.py new file mode 100644 index 000000000..7f3385362 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_automationrunbook_info.py @@ -0,0 +1,289 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Fred-sun, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_automationrunbook_info +version_added: '1.12.0' +short_description: Get Azure automation runbook facts +description: + - Get facts of automation runbook. + +options: + resource_group: + description: + - The name of the resource group. + type: str + required: True + automation_account_name: + description: + - The name of the automation account. + type: str + required: True + name: + description: + - The name of the automation runbook. + type: str + show_content: + description: + - Wether retrieve the content of runbook identified by runbook name. + type: bool + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Fred Sun (@Fred-sun) + +''' + +EXAMPLES = ''' +- name: Get details of an automation account + azure_rm_automationrunbook_info: + resource_group: "{{ resource_group }}" + automation_account_name: "{{ account-name }}" + name: "{{ runbook-name }}" + +- name: List automation runbook in the account + azure_rm_automationrunbook_info: + resource_group: "{{ resource_group }}" + automation_account_name: "{{ account-name }}" + +- name: Get details of an automation account + azure_rm_automationrunbook_info: + resource_group: "{{ resource_group }}" + automation_account_name: "{{ account-name }}" + name: "{{ runbook-name }}" + show_content: True + +''' + +RETURN = ''' +automation_runbook: + description: + - List of automation runbook dicts. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + type: str + returned: always + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups + /myResourceGroup/providers/Microsoft.Automation/automationAccounts/test/runbooks/runbook_name" + resource_group: + description: + - Resource group name. + type: str + returned: always + sample: myResourceGroup + name: + description: + - Resource name. + type: str + returned: always + sample: Testing + location: + description: + - Resource location. + type: str + returned: always + sample: eastus + creation_time: + description: + - The resource creation date time. + type: str + returned: always + sample: "2022-03-24T06:30:54.116666+00:00" + job_count: + description: + - The job count of the runbook. + type: int + returned: always + sample: 3 + last_modified_by: + description: + - The resource last modifier. + type: str + returned: always + sample: Fred-sun + last_modified_time: + description: + - The last person to update the resource. + type: str + returned: always + sample: "2022-03-25T06:30:54.116666+00:00" + log_activity_trace: + description: + - The option to log activity trace of the runbook. + type: int + returned: always + sample: 3 + log_progress: + description: + - Whether show progress log option. + type: bool + returned: always + sample: True + log_verbose: + description: + - Whether show verbose log option. + type: bool + returned: always + sample: True + output_types: + description: + - The runbook output type. + type: list + returned: always + sample: [] + runbook_content_link: + description: + - The publish runbook content link. + type: str + returned: always + sample: null + state: + description: + - The resource state. + type: str + returned: always + sample: Published + tags: + description: + - The resource tags. + type: list + returned: always + sample: { 'key1': 'value1' } + type: + description: + - The resource automation runbook type. + type: str + returned: always + sample: "Microsoft.Automation/AutomationAccounts/Runbooks" +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + pass + + +class AzureRMAutomationRunbookInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str' + ), + automation_account_name=dict( + type='str', + required=True + ), + show_content=dict( + type='bool' + ), + tags=dict( + type='list', + elements='str' + ) + ) + # store the results of the module operation + self.results = dict() + self.resource_group = None + self.name = None + self.tags = None + self.automation_account_name = None + self.show_content = None + + super(AzureRMAutomationRunbookInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False, facts_module=True) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec) + ['tags']: + setattr(self, key, kwargs[key]) + + if self.name and self.show_content: + runbooks = [self.get_content()] + elif self.name: + runbooks = [self.get()] + else: + runbooks = self.list_by_automaiton_account() + self.results['automation_runbook'] = [self.to_dict(x) for x in runbooks if x and self.has_tags(x.tags, self.tags)] + return self.results + + def get_content(self): + try: + return self.automation_client.runbook.get(self.resource_group, self.automation_account_name, self.name) + except ResourceNotFoundError as exc: + pass + + def get(self): + try: + return self.automation_client.runbook.get(self.resource_group, self.automation_account_name, self.name) + except ResourceNotFoundError as exc: + pass + + def list_by_automaiton_account(self): + result = [] + try: + resp = self.automation_client.runbook.list_by_automation_account(self.resource_group, self.automation_account_name) + while True: + result.append(resp.next()) + except StopIteration: + pass + except Exception as exc: + pass + return result + + def to_dict(self, runbook): + if not runbook: + return None + runbook_dict = dict( + id=runbook.id, + type=runbook.type, + name=runbook.name, + tags=runbook.tags, + location=runbook.location, + runbook_type=runbook.runbook_type, + runbook_content_link=runbook.publish_content_link, + state=runbook.state, + log_verbose=runbook.log_verbose, + log_progress=runbook.log_progress, + log_activity_trace=runbook.log_activity_trace, + job_count=runbook.job_count, + output_types=runbook.output_types, + last_modified_by=runbook.last_modified_by, + last_modified_time=runbook.last_modified_time, + creation_time=runbook.creation_time, + description=runbook.description + ) + + return runbook_dict + + +def main(): + AzureRMAutomationRunbookInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_autoscale.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_autoscale.py new file mode 100644 index 000000000..adcb6dfb9 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_autoscale.py @@ -0,0 +1,644 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Yuwei Zhou, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_autoscale +version_added: "0.1.2" +short_description: Manage Azure autoscale setting +description: + - Create, delete an autoscale setting. +options: + target: + description: + - The identifier of the resource to apply autoscale setting. + - It could be the resource id string. + - It also could be a dict contains the C(name), C(subscription_id), C(namespace), C(types), C(resource_group) of the resource. + resource_group: + required: true + description: + - Resource group of the resource. + enabled: + type: bool + description: + - Specifies whether automatic scaling is enabled for the resource. + default: true + profiles: + description: + - The collection of automatic scaling profiles that specify different scaling parameters for different time periods. + - A maximum of 20 profiles can be specified. + suboptions: + name: + required: true + description: + - The name of the profile. + count: + required: true + description: + - The number of instances that will be set if metrics are not available for evaluation. + - The default is only used if the current instance count is lower than the default. + min_count: + description: + - The minimum number of instances for the resource. + max_count: + description: + - The maximum number of instances for the resource. + - The actual maximum number of instances is limited by the cores that are available in the subscription. + recurrence_frequency: + default: None + description: + - How often the schedule profile should take effect. + - If this value is C(Week), meaning each week will have the same set of profiles. + - This element is not used if the FixedDate element is used. + choices: + - None + - Second + - Minute + - Hour + - Day + - Week + - Month + - Year + recurrence_timezone: + description: + - The timezone of repeating times at which this profile begins. + - This element is not used if the FixedDate element is used. + recurrence_days: + description: + - The days of repeating times at which this profile begins. + - This element is not used if the FixedDate element is used. + recurrence_hours: + description: + - The hours of repeating times at which this profile begins. + - This element is not used if the FixedDate element is used. + recurrence_mins: + description: + - The mins of repeating times at which this profile begins. + - This element is not used if the FixedDate element is used. + fixed_date_timezone: + description: + - The specific date-time timezone for the profile. + - This element is not used if the Recurrence element is used. + fixed_date_start: + description: + - The specific date-time start for the profile. + - This element is not used if the Recurrence element is used. + fixed_date_end: + description: + - The specific date-time end for the profile. + - This element is not used if the Recurrence element is used. + rules: + description: + - The collection of rules that provide the triggers and parameters for the scaling action. + - A maximum of 10 rules can be specified. + suboptions: + time_aggregation: + default: Average + description: + - How the data that is collected should be combined over time. + choices: + - Average + - Minimum + - Maximum + - Total + - Count + time_window: + required: true + description: + - The range of time(minutes) in which instance data is collected. + - This value must be greater than the delay in metric collection, which can vary from resource-to-resource. + - Must be between 5 ~ 720. + direction: + description: + - Whether the scaling action increases or decreases the number of instances. + choices: + - Increase + - Decrease + metric_name: + required: true + description: + - The name of the metric that defines what the rule monitors. + metric_resource_uri: + description: + - The resource identifier of the resource the rule monitors. + value: + description: + - The number of instances that are involved in the scaling action. + - This value must be 1 or greater. + operator: + default: GreaterThan + description: + - The operator that is used to compare the metric data and the threshold. + choices: + - Equals + - NotEquals + - GreaterThan + - GreaterThanOrEqual + - LessThan + - LessThanOrEqual + cooldown: + description: + - The amount of time (minutes) to wait since the last scaling action before this action occurs. + - It must be between 1 ~ 10080. + time_grain: + required: true + description: + - The granularity(minutes) of metrics the rule monitors. + - Must be one of the predefined values returned from metric definitions for the metric. + - Must be between 1 ~ 720. + statistic: + default: Average + description: + - How the metrics from multiple instances are combined. + choices: + - Average + - Min + - Max + - Sum + threshold: + default: 70 + description: + - The threshold of the metric that triggers the scale action. + type: + description: + - The type of action that should occur when the scale rule fires. + choices: + - PercentChangeCount + - ExactCount + - ChangeCount + notifications: + description: + - The collection of notifications. + suboptions: + custom_emails: + description: + - The custom e-mails list. This value can be null or empty, in which case this attribute will be ignored. + send_to_subscription_administrator: + type: bool + default: False + description: + - A value indicating whether to send email to subscription administrator. + webhooks: + description: + - The list of webhook notifications service uri. + send_to_subscription_co_administrators: + type: bool + default: False + description: + - A value indicating whether to send email to subscription co-administrators. + state: + default: present + description: + - Assert the state of the virtual network. Use C(present) to create or update and C(absent) to delete. + choices: + - present + - absent + location: + description: + - location of the resource. + name: + required: true + description: + - name of the resource. + + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Yuwei Zhou (@yuwzho) + +''' + +EXAMPLES = ''' +- name: Create an auto scale + azure_rm_autoscale: + target: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/virtualMachineScaleSets/myVmss" + enabled: true + profiles: + - count: '1' + recurrence_days: + - Monday + name: Auto created scale condition + recurrence_timezone: China Standard Time + recurrence_mins: + - '0' + min_count: '1' + max_count: '1' + recurrence_frequency: Week + recurrence_hours: + - '18' + name: scale + resource_group: myResourceGroup + +- name: Create an auto scale with complicated profile + azure_rm_autoscale: + target: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/virtualMachineScaleSets + /myVmss" + enabled: true + profiles: + - count: '1' + recurrence_days: + - Monday + name: Auto created scale condition 0 + rules: + - time_aggregation: Average + time_window: 10 + direction: Increase + metric_name: Percentage CPU + metric_resource_uri: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/virtua + lMachineScaleSets/vmss" + value: '1' + threshold: 70 + cooldown: 5 + time_grain: 1 + statistic: Average + operator: GreaterThan + type: ChangeCount + max_count: '1' + recurrence_mins: + - '0' + min_count: '1' + recurrence_timezone: China Standard Time + recurrence_frequency: Week + recurrence_hours: + - '6' + notifications: + - email_admin: True + email_co_admin: False + custom_emails: + - yuwzho@microsoft.com + name: scale + resource_group: myResourceGroup + +- name: Delete an Azure Auto Scale Setting + azure_rm_autoscale: + state: absent + resource_group: myResourceGroup + name: scale +''' + +RETURN = ''' +state: + description: Current state of the resource. + returned: always + type: dict + sample: { + "changed": false, + "enabled": true, + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/microsoft.insights/autoscalesettings/scale", + "location": "eastus", + "name": "scale", + "notifications": [ + { + "custom_emails": [ + "yuwzho@microsoft.com" + ], + "send_to_subscription_administrator": true, + "send_to_subscription_co_administrators": false, + "webhooks": [] + } + ], + "profiles": [ + { + "count": "1", + "max_count": "1", + "min_count": "1", + "name": "Auto created scale condition 0", + "recurrence_days": [ + "Monday" + ], + "recurrence_frequency": "Week", + "recurrence_hours": [ + "6" + ], + "recurrence_mins": [ + "0" + ], + "recurrence_timezone": "China Standard Time", + "rules": [ + { + "cooldown": 5.0, + "direction": "Increase", + "metric_name": "Percentage CPU", + "metric_resource_uri": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsof + t.Compute/virtualMachineScaleSets/MyVmss", + "operator": "GreaterThan", + "statistic": "Average", + "threshold": 70.0, + "time_aggregation": "Average", + "time_grain": 1.0, + "time_window": 10.0, + "type": "ChangeCount", + "value": "1" + } + ] + } + ], + "target": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/virtualMachineScale + Sets/myVmss" + } +''' # NOQA + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id +from ansible.module_utils._text import to_native +from datetime import timedelta + +try: + from azure.mgmt.monitor.models import WebhookNotification, EmailNotification, AutoscaleNotification, RecurrentSchedule, MetricTrigger, \ + ScaleAction, AutoscaleSettingResource, AutoscaleProfile, ScaleCapacity, TimeWindow, Recurrence, ScaleRule +except ImportError: + # This is handled in azure_rm_common + pass + + +# duplicated in azure_rm_autoscale_facts +def timedelta_to_minutes(time): + if not time: + return 0 + return time.days * 1440 + time.seconds / 60.0 + time.microseconds / 60000000.0 + + +def get_enum_value(item): + if hasattr(item, 'value'): + return to_native(item.value) + return to_native(item) + + +def auto_scale_to_dict(instance): + if not instance: + return dict() + return dict( + id=to_native(instance.id or ''), + name=to_native(instance.name), + location=to_native(instance.location), + profiles=[profile_to_dict(p) for p in instance.profiles or []], + notifications=[notification_to_dict(n) for n in instance.notifications or []], + enabled=instance.enabled, + target=to_native(instance.target_resource_uri), + tags=instance.tags + ) + + +def rule_to_dict(rule): + if not rule: + return dict() + result = dict(metric_name=to_native(rule.metric_trigger.metric_name), + metric_resource_uri=to_native(rule.metric_trigger.metric_resource_uri), + time_grain=timedelta_to_minutes(rule.metric_trigger.time_grain), + statistic=get_enum_value(rule.metric_trigger.statistic), + time_window=timedelta_to_minutes(rule.metric_trigger.time_window), + time_aggregation=get_enum_value(rule.metric_trigger.time_aggregation), + operator=get_enum_value(rule.metric_trigger.operator), + threshold=float(rule.metric_trigger.threshold)) + if rule.scale_action and to_native(rule.scale_action.direction) != 'None': + result['direction'] = get_enum_value(rule.scale_action.direction) + result['type'] = get_enum_value(rule.scale_action.type) + result['value'] = to_native(rule.scale_action.value) + result['cooldown'] = timedelta_to_minutes(rule.scale_action.cooldown) + return result + + +def profile_to_dict(profile): + if not profile: + return dict() + result = dict(name=to_native(profile.name), + count=to_native(profile.capacity.default), + max_count=to_native(profile.capacity.maximum), + min_count=to_native(profile.capacity.minimum)) + + if profile.rules: + result['rules'] = [rule_to_dict(r) for r in profile.rules] + if profile.fixed_date: + result['fixed_date_timezone'] = profile.fixed_date.time_zone + result['fixed_date_start'] = profile.fixed_date.start + result['fixed_date_end'] = profile.fixed_date.end + if profile.recurrence: + if get_enum_value(profile.recurrence.frequency) != 'None': + result['recurrence_frequency'] = get_enum_value(profile.recurrence.frequency) + if profile.recurrence.schedule: + result['recurrence_timezone'] = to_native(str(profile.recurrence.schedule.time_zone)) + result['recurrence_days'] = [to_native(r) for r in profile.recurrence.schedule.days] + result['recurrence_hours'] = [to_native(r) for r in profile.recurrence.schedule.hours] + result['recurrence_mins'] = [to_native(r) for r in profile.recurrence.schedule.minutes] + return result + + +def notification_to_dict(notification): + if not notification: + return dict() + return dict(send_to_subscription_administrator=notification.email.send_to_subscription_administrator if notification.email else False, + send_to_subscription_co_administrators=notification.email.send_to_subscription_co_administrators if notification.email else False, + custom_emails=[to_native(e) for e in notification.email.custom_emails or []], + webhooks=[to_native(w.service_uri) for w in notification.webhooks or []]) + + +rule_spec = dict( + metric_name=dict(type='str', required=True), + metric_resource_uri=dict(type='str'), + time_grain=dict(type='float', required=True), + statistic=dict(type='str', choices=['Average', 'Min', 'Max', 'Sum'], default='Average'), + time_window=dict(type='float', required=True), + time_aggregation=dict(type='str', choices=['Average', 'Minimum', 'Maximum', 'Total', 'Count'], default='Average'), + operator=dict(type='str', + choices=['Equals', 'NotEquals', 'GreaterThan', 'GreaterThanOrEqual', 'LessThan', 'LessThanOrEqual'], + default='GreaterThan'), + threshold=dict(type='float', default=70), + direction=dict(type='str', choices=['Increase', 'Decrease']), + type=dict(type='str', choices=['PercentChangeCount', 'ExactCount', 'ChangeCount']), + value=dict(type='str'), + cooldown=dict(type='float') +) + + +profile_spec = dict( + name=dict(type='str', required=True), + count=dict(type='str', required=True), + max_count=dict(type='str'), + min_count=dict(type='str'), + rules=dict(type='list', elements='dict', options=rule_spec), + fixed_date_timezone=dict(type='str'), + fixed_date_start=dict(type='str'), + fixed_date_end=dict(type='str'), + recurrence_frequency=dict(type='str', choices=['None', 'Second', 'Minute', 'Hour', 'Day', 'Week', 'Month', 'Year'], default='None'), + recurrence_timezone=dict(type='str'), + recurrence_days=dict(type='list', elements='str'), + recurrence_hours=dict(type='list', elements='str'), + recurrence_mins=dict(type='list', elements='str') +) + + +notification_spec = dict( + send_to_subscription_administrator=dict(type='bool', aliases=['email_admin'], default=False), + send_to_subscription_co_administrators=dict(type='bool', aliases=['email_co_admin'], default=False), + custom_emails=dict(type='list', elements='str'), + webhooks=dict(type='list', elements='str') +) + + +class AzureRMAutoScale(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True), + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + location=dict(type='str'), + target=dict(type='raw'), + profiles=dict(type='list', elements='dict', options=profile_spec), + enabled=dict(type='bool', default=True), + notifications=dict(type='list', elements='dict', options=notification_spec) + ) + + self.results = dict( + changed=False + ) + + required_if = [ + ('state', 'present', ['target', 'profiles']) + ] + + self.resource_group = None + self.name = None + self.state = None + self.location = None + self.tags = None + self.target = None + self.profiles = None + self.notifications = None + self.enabled = None + + super(AzureRMAutoScale, self).__init__(self.module_arg_spec, supports_check_mode=True, required_if=required_if) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + results = None + changed = False + + self.log('Fetching auto scale settings {0}'.format(self.name)) + results = self.get_auto_scale() + if results and self.state == 'absent': + # delete + changed = True + if not self.check_mode: + self.delete_auto_scale() + elif self.state == 'present': + + if not self.location: + # Set default location + resource_group = self.get_resource_group(self.resource_group) + self.location = resource_group.location + + resource_id = self.target + if isinstance(self.target, dict): + resource_id = format_resource_id(val=self.target['name'], + subscription_id=self.target.get('subscription_id') or self.subscription_id, + namespace=self.target['namespace'], + types=self.target['types'], + resource_group=self.target.get('resource_group') or self.resource_group) + self.target = resource_id + resource_name = self.name + + def create_rule_instance(params): + rule = params.copy() + rule['metric_resource_uri'] = rule.get('metric_resource_uri', self.target) + rule['time_grain'] = timedelta(minutes=rule.get('time_grain', 0)) + rule['time_window'] = timedelta(minutes=rule.get('time_window', 0)) + rule['cooldown'] = timedelta(minutes=rule.get('cooldown', 0)) + return ScaleRule(metric_trigger=MetricTrigger(**rule), scale_action=ScaleAction(**rule)) + + profiles = [AutoscaleProfile(name=p.get('name'), + capacity=ScaleCapacity(minimum=p.get('min_count'), + maximum=p.get('max_count'), + default=p.get('count')), + rules=[create_rule_instance(r) for r in p.get('rules') or []], + fixed_date=TimeWindow(time_zone=p.get('fixed_date_timezone'), + start=p.get('fixed_date_start'), + end=p.get('fixed_date_end')) if p.get('fixed_date_timezone') else None, + recurrence=Recurrence(frequency=p.get('recurrence_frequency'), + schedule=(RecurrentSchedule(time_zone=p.get('recurrence_timezone'), + days=p.get('recurrence_days'), + hours=p.get('recurrence_hours'), + minutes=p.get('recurrence_mins')))) + if p.get('recurrence_frequency') and p['recurrence_frequency'] != 'None' else None) + for p in self.profiles or []] + + notifications = [AutoscaleNotification(email=EmailNotification(**n), + webhooks=[WebhookNotification(service_uri=w) for w in n.get('webhooks') or []]) + for n in self.notifications or []] + + if not results: + # create new + changed = True + else: + # check changed + resource_name = results.name_properties_name or self.name + update_tags, tags = self.update_tags(results.tags) + if update_tags: + changed = True + self.tags = tags + if self.target != results.target_resource_uri: + changed = True + if self.enabled != results.enabled: + changed = True + profile_result_set = set([str(profile_to_dict(p)) for p in results.profiles or []]) + if profile_result_set != set([str(profile_to_dict(p)) for p in profiles]): + changed = True + notification_result_set = set([str(notification_to_dict(n)) for n in results.notifications or []]) + if notification_result_set != set([str(notification_to_dict(n)) for n in notifications]): + changed = True + if changed: + # construct the instance will be send to create_or_update api + results = AutoscaleSettingResource(location=self.location, + tags=self.tags, + profiles=profiles, + notifications=notifications, + enabled=self.enabled, + name_properties_name=resource_name, + target_resource_uri=self.target) + if not self.check_mode: + results = self.create_or_update_auto_scale(results) + # results should be the dict of the instance + self.results = auto_scale_to_dict(results) + self.results['changed'] = changed + return self.results + + def get_auto_scale(self): + try: + return self.monitor_autoscale_settings_client.autoscale_settings.get(self.resource_group, self.name) + except Exception as exc: + self.log('Error: failed to get auto scale settings {0} - {1}'.format(self.name, str(exc))) + return None + + def create_or_update_auto_scale(self, param): + try: + return self.monitor_autoscale_settings_client.autoscale_settings.create_or_update(self.resource_group, self.name, param) + except Exception as exc: + self.fail("Error creating auto scale settings {0} - {1}".format(self.name, str(exc))) + + def delete_auto_scale(self): + self.log('Deleting auto scale settings {0}'.format(self.name)) + try: + return self.monitor_autoscale_settings_client.autoscale_settings.delete(self.resource_group, self.name) + except Exception as exc: + self.fail("Error deleting auto scale settings {0} - {1}".format(self.name, str(exc))) + + +def main(): + AzureRMAutoScale() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_autoscale_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_autoscale_info.py new file mode 100644 index 000000000..49411ad48 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_autoscale_info.py @@ -0,0 +1,271 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Yuwei Zhou, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_autoscale_info +version_added: "0.1.2" +short_description: Get Azure Auto Scale Setting facts +description: + - Get facts of Auto Scale Setting. + +options: + resource_group: + description: + - The name of the resource group. + required: True + name: + description: + - The name of the Auto Scale Setting. + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Yuwei Zhou (@yuwzho) + +''' + +EXAMPLES = ''' + - name: Get instance of Auto Scale Setting + azure_rm_autoscale_info: + resource_group: myResourceGroup + name: auto_scale_name + + - name: List instances of Auto Scale Setting + azure_rm_autoscale_info: + resource_group: myResourceGroup + tags: + - key + - key:value +''' + +RETURN = ''' +autoscales: + description: List of Azure Scale Settings dicts. + returned: always + type: list + sample: [{ + "enabled": true, + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/microsoft.insights/autoscalesettings/scale", + "location": "eastus", + "name": "scale", + "notifications": [ + { + "custom_emails": [ + "yuwzho@microsoft.com" + ], + "send_to_subscription_administrator": true, + "send_to_subscription_co_administrators": false, + "webhooks": [] + } + ], + "profiles": [ + { + "count": "1", + "max_count": "1", + "min_count": "1", + "name": "Auto created scale condition 0", + "recurrence_days": [ + "Monday" + ], + "recurrence_frequency": "Week", + "recurrence_hours": [ + "6" + ], + "recurrence_mins": [ + "0" + ], + "recurrence_timezone": "China Standard Time", + "rules": [ + { + "cooldown": 5.0, + "direction": "Increase", + "metric_name": "Percentage CPU", + "metric_resource_uri": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsof + t.Compute/virtualMachineScaleSets/myVmss", + "operator": "GreaterThan", + "statistic": "Average", + "threshold": 70.0, + "time_aggregation": "Average", + "time_grain": 1.0, + "time_window": 10.0, + "type": "ChangeCount", + "value": "1" + } + ] + } + ], + "target": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/virtualMachineScale + Sets/myVmss" + }] + +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +from ansible.module_utils._text import to_native + +try: + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +# duplicated in azure_rm_autoscale +def timedelta_to_minutes(time): + if not time: + return 0 + return time.days * 1440 + time.seconds / 60.0 + time.microseconds / 60000000.0 + + +def get_enum_value(item): + if hasattr(item, 'value'): + return to_native(item.value) + return to_native(item) + + +def auto_scale_to_dict(instance): + if not instance: + return dict() + return dict( + id=to_native(instance.id or ''), + name=to_native(instance.name), + location=to_native(instance.location), + profiles=[profile_to_dict(p) for p in instance.profiles or []], + notifications=[notification_to_dict(n) for n in instance.notifications or []], + enabled=instance.enabled, + target=to_native(instance.target_resource_uri), + tags=instance.tags + ) + + +def rule_to_dict(rule): + if not rule: + return dict() + result = dict(metric_name=to_native(rule.metric_trigger.metric_name), + metric_resource_uri=to_native(rule.metric_trigger.metric_resource_uri), + time_grain=timedelta_to_minutes(rule.metric_trigger.time_grain), + statistic=get_enum_value(rule.metric_trigger.statistic), + time_window=timedelta_to_minutes(rule.metric_trigger.time_window), + time_aggregation=get_enum_value(rule.metric_trigger.time_aggregation), + operator=get_enum_value(rule.metric_trigger.operator), + threshold=float(rule.metric_trigger.threshold)) + if rule.scale_action and to_native(rule.scale_action.direction) != 'None': + result['direction'] = get_enum_value(rule.scale_action.direction) + result['type'] = get_enum_value(rule.scale_action.type) + result['value'] = to_native(rule.scale_action.value) + result['cooldown'] = timedelta_to_minutes(rule.scale_action.cooldown) + return result + + +def profile_to_dict(profile): + if not profile: + return dict() + result = dict(name=to_native(profile.name), + count=to_native(profile.capacity.default), + max_count=to_native(profile.capacity.maximum), + min_count=to_native(profile.capacity.minimum)) + + if profile.rules: + result['rules'] = [rule_to_dict(r) for r in profile.rules] + if profile.fixed_date: + result['fixed_date_timezone'] = profile.fixed_date.time_zone + result['fixed_date_start'] = profile.fixed_date.start + result['fixed_date_end'] = profile.fixed_date.end + if profile.recurrence: + if get_enum_value(profile.recurrence.frequency) != 'None': + result['recurrence_frequency'] = get_enum_value(profile.recurrence.frequency) + if profile.recurrence.schedule: + result['recurrence_timezone'] = to_native(str(profile.recurrence.schedule.time_zone)) + result['recurrence_days'] = [to_native(r) for r in profile.recurrence.schedule.days] + result['recurrence_hours'] = [to_native(r) for r in profile.recurrence.schedule.hours] + result['recurrence_mins'] = [to_native(r) for r in profile.recurrence.schedule.minutes] + return result + + +def notification_to_dict(notification): + if not notification: + return dict() + return dict(send_to_subscription_administrator=notification.email.send_to_subscription_administrator if notification.email else False, + send_to_subscription_co_administrators=notification.email.send_to_subscription_co_administrators if notification.email else False, + custom_emails=[to_native(e) for e in notification.email.custom_emails or []], + webhooks=[to_native(w.service_url) for w in notification.webhooks or []]) + + +class AzureRMAutoScaleInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str' + ), + tags=dict( + type='list', + elements='str' + ) + ) + # store the results of the module operation + self.results = dict() + self.resource_group = None + self.name = None + self.tags = None + + super(AzureRMAutoScaleInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False, facts_module=True) + + def exec_module(self, **kwargs): + + is_old_facts = self.module._name == 'azure_rm_autoscale_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_autoscale_facts' module has been renamed to 'azure_rm_autoscale_info'", version=(2.9, )) + + for key in list(self.module_arg_spec): + setattr(self, key, kwargs[key]) + + if self.resource_group and self.name: + self.results['autoscales'] = self.get() + elif self.resource_group: + self.results['autoscales'] = self.list_by_resource_group() + return self.results + + def get(self): + result = [] + try: + instance = self.monitor_autoscale_settings_client.autoscale_settings.get(self.resource_group, self.name) + result = [auto_scale_to_dict(instance) if self.has_tags(instance.tags, self.tags) else None] + except Exception as ex: + self.log('Could not get facts for autoscale {0} - {1}.'.format(self.name, str(ex))) + return result + + def list_by_resource_group(self): + results = [] + try: + response = self.monitor_autoscale_settings_client.autoscale_settings.list_by_resource_group(self.resource_group) + results = [auto_scale_to_dict(item) for item in response if self.has_tags(item.tags, self.tags)] + except Exception as ex: + self.log('Could not get facts for autoscale {0} - {1}.'.format(self.name, str(ex))) + return results + + +def main(): + AzureRMAutoScaleInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_availabilityset.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_availabilityset.py new file mode 100644 index 000000000..1188a07c5 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_availabilityset.py @@ -0,0 +1,384 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Julien Stroheker, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_availabilityset + +version_added: "0.1.2" + +short_description: Manage Azure Availability Set + +description: + - Create, update and delete Azure Availability Set. + - An availability set cannot be updated, you will have to recreate one instead. + - The only update operation will be for the tags. + +options: + resource_group: + description: + - Name of a resource group where the availability set exists or will be created. + required: true + name: + description: + - Name of the availability set. + required: true + state: + description: + - Assert the state of the availability set. + - Use C(present) to create or update a availability set and C(absent) to delete a availability set. + default: present + choices: + - absent + - present + location: + description: + - Valid Azure location. Defaults to location of the resource group. + platform_update_domain_count: + description: + - Update domains indicate groups of virtual machines and underlying physical hardware that can be rebooted at the same time. + type: int + default: 5 + platform_fault_domain_count: + description: + - Fault domains define the group of virtual machines that share a common power source and network switch. + - Should be between C(1) and C(3). + type: int + default: 3 + proximity_placement_group: + description: + - The proximity placement group that the availability set should be assigned to. + type: str + sku: + description: + - Define if the availability set supports managed disks. + default: Classic + choices: + - Classic + - Aligned +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Julien Stroheker (@julienstroheker) +''' + +EXAMPLES = ''' + - name: Create an availability set with default options + azure_rm_availabilityset: + name: myAvailabilitySet + location: eastus + resource_group: myResourceGroup + + - name: Create an availability set with advanced options + azure_rm_availabilityset: + name: myAvailabilitySet + location: eastus + resource_group: myResourceGroup + platform_update_domain_count: 5 + platform_fault_domain_count: 3 + proximity_placement_group: myProximityPlacementGroup + sku: Aligned + + - name: Delete an availability set + azure_rm_availabilityset: + name: myAvailabilitySet + location: eastus + resource_group: myResourceGroup + state: absent +''' + +RETURN = ''' +state: + description: Current state of the availability set. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + type: str + sample: "/subscriptions/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx/resourceGroups/v-xisuRG/providers/Microsoft.Compute/availabilitySets/myavailabilityset2" + location: + description: + - Location where the resource lives. + type: str + sample: eastus + name: + description: + - Resource name. + type: str + sample: myavailabilityset2 + platform_fault_domain_count: + description: + - Fault domains values. + type: int + sample: 2 + platform_update_domain_count: + description: + - Update domains values. + type: int + sample: 5 + proximity_placement_group: + description: + - The proximity placement group that the availability is assigned to. + type: str + sample: myProximityPlacementGroup + sku: + description: + - The availability set supports managed disks. + type: str + sample: Aligned + tags: + description: + - Resource tags. + type: dict + sample: {env: sandbox} + +changed: + description: Whether or not the resource has changed + returned: always + type: bool + sample: true +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from msrestazure.tools import parse_resource_id + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + + +def availability_set_to_dict(avaset): + ''' + Serializing the availability set from the API to Dict + :return: dict + ''' + return dict( + id=avaset.id, + name=avaset.name, + location=avaset.location, + platform_update_domain_count=avaset.platform_update_domain_count, + platform_fault_domain_count=avaset.platform_fault_domain_count, + proximity_placement_group=avaset.proximity_placement_group.id if avaset.proximity_placement_group else None, + tags=avaset.tags, + sku=avaset.sku.name + ) + + +class AzureRMAvailabilitySet(AzureRMModuleBase): + """Configuration class for an Azure RM availability set resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ), + location=dict( + type='str' + ), + platform_update_domain_count=dict( + type='int', + default=5 + ), + platform_fault_domain_count=dict( + type='int', + default=3 + ), + proximity_placement_group=dict( + type='str', + required=False + ), + sku=dict( + type='str', + default='Classic', + choices=['Classic', 'Aligned'] + ) + ) + + self.resource_group = None + self.name = None + self.location = None + self.tags = None + self.platform_update_domain_count = None + self.platform_fault_domain_count = None + self.proximity_placement_group = None + self.proximity_placement_group_resource = None + self.sku = None + self.state = None + self.warning = False + + self.results = dict(changed=False, state=dict()) + + super(AzureRMAvailabilitySet, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + resource_group = None + response = None + to_be_updated = False + proximity_placement_group_id = None + + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + self.location = resource_group.location + + # Check if the AS already present in the RG + if self.state == 'present': + response = self.get_availabilityset() + self.results['state'] = response + + if self.proximity_placement_group is not None: + parsed_proximity_placement_group = parse_resource_id(self.proximity_placement_group) + proximity_placement_group = self.get_proximity_placement_group(parsed_proximity_placement_group.get('resource_group', self.resource_group), + parsed_proximity_placement_group.get('name')) + self.proximity_placement_group_resource = self.compute_models.SubResource(id=proximity_placement_group.id) + proximity_placement_group_id = proximity_placement_group.id.lower() + + if not response: + to_be_updated = True + else: + update_tags, response['tags'] = self.update_tags(response['tags']) + response_proximity_placement_group = ( + response['proximity_placement_group'].lower() if response.get('proximity_placement_group') is not None else None + ) + + if update_tags: + self.log("Tags has to be updated") + to_be_updated = True + + if response['platform_update_domain_count'] != self.platform_update_domain_count: + self.faildeploy('platform_update_domain_count') + + if response['platform_fault_domain_count'] != self.platform_fault_domain_count: + self.faildeploy('platform_fault_domain_count') + + if response_proximity_placement_group != proximity_placement_group_id: + self.faildeploy('proximity_placement_group') + + if response['sku'] != self.sku: + self.faildeploy('sku') + + if self.check_mode: + self.results['changed'] = to_be_updated + return self.results + + if to_be_updated: + self.results['state'] = self.create_or_update_availabilityset() + self.results['changed'] = True + + elif self.state == 'absent': + response = self.get_availabilityset() + if response: + if not self.check_mode: + self.delete_availabilityset() + self.results['changed'] = True + + return self.results + + def faildeploy(self, param): + ''' + Helper method to push fail message in the console. + Useful to notify that the users cannot change some values in a Availability Set + + :param: variable's name impacted + :return: void + ''' + self.fail("You tried to change {0} but is was unsuccessful. An Availability Set is immutable, except tags".format(str(param))) + + def create_or_update_availabilityset(self): + ''' + Method calling the Azure SDK to create or update the AS. + :return: void + ''' + self.log("Creating availabilityset {0}".format(self.name)) + try: + params_sku = self.compute_models.Sku( + name=self.sku + ) + params = self.compute_models.AvailabilitySet( + location=self.location, + tags=self.tags, + platform_update_domain_count=self.platform_update_domain_count, + platform_fault_domain_count=self.platform_fault_domain_count, + proximity_placement_group=self.proximity_placement_group_resource, + sku=params_sku + ) + response = self.compute_client.availability_sets.create_or_update(self.resource_group, self.name, params) + except Exception as e: + self.log('Error attempting to create the availability set.') + self.fail("Error creating the availability set: {0}".format(str(e))) + + return availability_set_to_dict(response) + + def delete_availabilityset(self): + ''' + Method calling the Azure SDK to delete the AS. + :return: void + ''' + self.log("Deleting availabilityset {0}".format(self.name)) + try: + response = self.compute_client.availability_sets.delete(self.resource_group, self.name) + except Exception as e: + self.log('Error attempting to delete the availability set.') + self.fail("Error deleting the availability set: {0}".format(str(e))) + + return True + + def get_availabilityset(self): + ''' + Method calling the Azure SDK to get an AS. + :return: void + ''' + self.log("Checking if the availabilityset {0} is present".format(self.name)) + found = False + try: + response = self.compute_client.availability_sets.get(self.resource_group, self.name) + found = True + except ResourceNotFoundError as e: + self.log('Did not find the Availability set.') + if found is True: + return availability_set_to_dict(response) + else: + return False + + def get_proximity_placement_group(self, resource_group, name): + try: + return self.compute_client.proximity_placement_groups.get(resource_group, name) + except ResourceNotFoundError as exc: + self.fail("Error fetching proximity placement group {0} - {1}".format(name, str(exc))) + + +def main(): + """Main execution""" + AzureRMAvailabilitySet() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_availabilityset_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_availabilityset_info.py new file mode 100644 index 000000000..16122f67a --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_availabilityset_info.py @@ -0,0 +1,220 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Julien Stroheker +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_availabilityset_info + +version_added: "0.1.2" + +short_description: Get Azure Availability Set facts + +description: + - Get facts for a specific availability set or all availability sets. + +options: + name: + description: + - Limit results to a specific availability set. + type: str + resource_group: + description: + - The resource group to search for the desired availability set. + type: str + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Julien Stroheker (@julienstroheker) +''' + +EXAMPLES = ''' + - name: Get facts for one availability set + azure_rm_availabilityset_info: + name: Testing + resource_group: myResourceGroup + + - name: Get facts for all availability sets in a specific resource group + azure_rm_availabilityset_info: + resource_group: myResourceGroup + tags: + - key + - key:value + +''' + +RETURN = ''' +azure_availabilityset: + description: List of availability sets dicts. + returned: always + type: complex + contains: + location: + description: + - Location where the resource lives. + type: str + sample: eastus2 + name: + description: + - Resource name. + type: str + sample: myAvailabilitySet + properties: + description: + - The properties of the resource. + type: dict + contains: + platformFaultDomainCount: + description: + - Fault Domain count. + type: int + sample: 3 + platformUpdateDomainCount: + description: + - Update Domain count. + type: int + sample: 2 + virtualMachines: + description: + - A list of references to all virtualmachines in the availability set. + type: list + sample: [] + sku: + description: + - Location where the resource lives. + type: str + sample: Aligned + type: + description: + - Resource type. + type: str + sample: "Microsoft.Compute/availabilitySets" + tags: + description: + - Resource tags. + type: dict + sample: { env: sandbox } +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError +except Exception: + # handled in azure_rm_common + pass + +AZURE_OBJECT_CLASS = 'AvailabilitySet' + + +class AzureRMAvailabilitySetInfo(AzureRMModuleBase): + """Utility class to get availability set facts""" + + def __init__(self): + + self.module_args = dict( + name=dict(type='str'), + resource_group=dict(type='str'), + tags=dict(type='list', elements='str') + ) + + self.results = dict( + changed=False, + ansible_info=dict( + azure_availabilitysets=[] + ) + ) + + self.name = None + self.resource_group = None + self.tags = None + + super(AzureRMAvailabilitySetInfo, self).__init__( + derived_arg_spec=self.module_args, + supports_check_mode=True, + supports_tags=False, + facts_module=True + ) + + def exec_module(self, **kwargs): + + is_old_facts = self.module._name == 'azure_rm_availabilityset_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_availabilityset_facts' module has been renamed to 'azure_rm_availabilityset_info'", version=(2.9, )) + + for key in self.module_args: + setattr(self, key, kwargs[key]) + + if self.name and not self.resource_group: + self.fail("Parameter error: resource group required when filtering by name.") + if self.name: + self.results['ansible_info']['azure_availabilitysets'] = self.get_item() + else: + self.results['ansible_info']['azure_availabilitysets'] = self.list_items() + + return self.results + + def get_item(self): + """Get a single availability set""" + + self.log('Get properties for {0}'.format(self.name)) + + item = None + result = [] + + try: + item = self.compute_client.availability_sets.get(self.resource_group, self.name) + except ResourceNotFoundError: + pass + + if item and self.has_tags(item.tags, self.tags): + avase = self.serialize_obj(item, AZURE_OBJECT_CLASS) + avase['name'] = item.name + avase['type'] = item.type + avase['sku'] = item.sku.name + result = [avase] + + return result + + def list_items(self): + """Get all availability sets""" + + self.log('List all availability sets') + + try: + response = self.compute_client.availability_sets.list(self.resource_group) + except ResourceNotFoundError as exc: + self.fail('Failed to list all items - {0}'.format(str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + avase = self.serialize_obj(item, AZURE_OBJECT_CLASS) + avase['name'] = item.name + avase['type'] = item.type + avase['sku'] = item.sku.name + results.append(avase) + + return results + + +def main(): + """Main module execution code path""" + + AzureRMAvailabilitySetInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_azurefirewall.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_azurefirewall.py new file mode 100644 index 000000000..099fb57fc --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_azurefirewall.py @@ -0,0 +1,721 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Zim Kalinowski, (@zikalino), Jurijs Fadejevs (@needgithubid) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_azurefirewall +version_added: '0.1.2' +short_description: Manage Azure Firewall instance +description: + - Create, update and delete instance of Azure Firewall. +options: + resource_group: + description: + - The name of the resource group. + required: true + type: str + name: + description: + - The name of the Azure Firewall. + required: true + type: str + location: + description: + - Resource location. + type: str + application_rule_collections: + description: + - Collection of application rule collections used by Azure Firewall. + type: list + suboptions: + priority: + description: + - Priority of the application rule collection resource. + type: int + action: + description: + - The action type of a rule collection. + choices: + - allow + - deny + type: str + rules: + description: + - Collection of rules used by a application rule collection. + type: list + suboptions: + name: + description: + - Name of the application rule. + type: str + description: + description: + - Description of the rule. + type: str + source_addresses: + description: + - List of source IP addresses for this rule. + type: list + protocols: + description: + - Array of ApplicationRuleProtocols. + type: list + target_fqdns: + description: + - List of FQDNs for this rule. + type: list + fqdn_tags: + description: + - List of FQDN Tags for this rule. + type: list + name: + description: + - Gets name of the resource that is unique within a resource group. + - This name can be used to access the resource. + type: str + nat_rule_collections: + description: + - Collection of NAT rule collections used by Azure Firewall. + type: list + suboptions: + priority: + description: + - Priority of the NAT rule collection resource. + type: int + action: + description: + - The action type of a NAT rule collection + choices: + - snat + - dnat + type: str + rules: + description: + - Collection of rules used by a NAT rule collection. + type: list + suboptions: + name: + description: + - Name of the NAT rule. + type: str + description: + description: + - Description of the rule. + type: str + source_addresses: + description: + - List of source IP addresses for this rule. + type: list + destination_addresses: + description: + - List of destination IP addresses for this rule. + type: list + destination_ports: + description: + - List of destination ports. + type: list + protocols: + description: + - Array of AzureFirewallNetworkRuleProtocols applicable to this NAT rule. + type: list + translated_address: + description: + - The translated address for this NAT rule. + type: str + translated_port: + description: + - The translated port for this NAT rule. + type: str + name: + description: + - Gets name of the resource that is unique within a resource group. + - This name can be used to access the resource. + type: str + network_rule_collections: + description: + - Collection of network rule collections used by Azure Firewall. + type: list + suboptions: + priority: + description: + - Priority of the network rule collection resource. + type: int + action: + description: + - The action type of a rule collection. + type: str + choices: + - allow + - deny + rules: + description: + - Collection of rules used by a network rule collection. + type: list + suboptions: + name: + description: + - Name of the network rule. + type: str + description: + description: + - Description of the rule. + type: str + protocols: + description: + - Array of AzureFirewallNetworkRuleProtocols. + type: list + source_addresses: + description: + - List of source IP addresses for this rule. + type: list + destination_addresses: + description: + - List of destination IP addresses. + type: list + destination_ports: + description: + - List of destination ports. + type: list + name: + description: + - Gets name of the resource that is unique within a resource group. + - This name can be used to access the resource. + type: str + ip_configurations: + description: + - IP configuration of the Azure Firewall resource. + type: list + suboptions: + subnet: + description: + - Existing subnet. + - It can be a string containing subnet resource ID. + - It can be a dictionary containing I(name), I(virtual_network_name) and optionally I(resource_group) . + type: raw + public_ip_address: + description: + - Existing public IP address. + - It can be a string containing resource ID. + - It can be a string containing a name in current resource group. + - It can be a dictionary containing I(name) and optionally I(resource_group). + type: raw + name: + description: + - Name of the resource that is unique within a resource group. + - This name can be used to access the resource. + type: str + state: + description: + - Assert the state of the AzureFirewall. + - Use C(present) to create or update an AzureFirewall and C(absent) to delete it. + default: present + type: str + choices: + - absent + - present +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags +author: + - Zim Kalinowski (@zikalino) + - Jurijs Fadejevs (@needgithubid) + +''' + +EXAMPLES = ''' +- name: Create Azure Firewall + azure_rm_azurefirewall: + resource_group: myResourceGroup + name: myAzureFirewall + tags: + key1: value1 + application_rule_collections: + - priority: 110 + action: deny + rules: + - name: rule1 + description: Deny inbound rule + source_addresses: + - 216.58.216.164 + - 10.0.0.0/24 + protocols: + - type: https + port: '443' + target_fqdns: + - www.test.com + name: apprulecoll + nat_rule_collections: + - priority: 112 + action: dnat + rules: + - name: DNAT-HTTPS-traffic + description: D-NAT all outbound web traffic for inspection + source_addresses: + - '*' + destination_addresses: + - 1.2.3.4 + destination_ports: + - '443' + protocols: + - tcp + translated_address: 1.2.3.5 + translated_port: '8443' + name: natrulecoll + network_rule_collections: + - priority: 112 + action: deny + rules: + - name: L4-traffic + description: Block traffic based on source IPs and ports + protocols: + - tcp + source_addresses: + - 192.168.1.1-192.168.1.12 + - 10.1.4.12-10.1.4.255 + destination_addresses: + - '*' + destination_ports: + - 443-444 + - '8443' + name: netrulecoll + ip_configurations: + - subnet: >- + /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup + /providers/Microsoft.Network/virtualNetworks/myVirtualNetwork + /subnets/AzureFirewallSubnet + public_ip_address: >- + /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup + /providers/Microsoft.Network/publicIPAddresses/ + myPublicIpAddress + name: azureFirewallIpConfiguration +- name: Delete Azure Firewall + azure_rm_azurefirewall: + resource_group: myResourceGroup + name: myAzureFirewall + state: absent + +''' + +RETURN = ''' +id: + description: + - Resource ID. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/azureFirewalls/myAzureFirewall +''' + +import time +import json +import re +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_rest import GenericRestClient +from copy import deepcopy +try: + from msrestazure.azure_exceptions import CloudError +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMAzureFirewalls(AzureRMModuleBaseExt): + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + disposition='resource_group_name', + required=True + ), + name=dict( + type='str', + disposition='azure_firewall_name', + required=True + ), + location=dict( + type='str', + updatable=False, + disposition='/', + comparison='location' + ), + application_rule_collections=dict( + type='list', + disposition='/properties/applicationRuleCollections', + options=dict( + priority=dict( + type='int', + disposition='properties/*' + ), + action=dict( + type='str', + choices=['allow', + 'deny'], + disposition='properties/action/type', + pattern='camelize' + ), + rules=dict( + type='list', + disposition='properties/*', + options=dict( + name=dict( + type='str' + ), + description=dict( + type='str' + ), + source_addresses=dict( + type='list', + disposition='sourceAddresses' + ), + protocols=dict( + type='list', + options=dict( + type=dict( + type='str', + disposition='protocolType' + ), + port=dict( + type='str' + ) + ) + ), + target_fqdns=dict( + type='list', + disposition='targetFqdns' + ), + fqdn_tags=dict( + type='list', + disposition='fqdnTags' + ) + ) + ), + name=dict( + type='str' + ) + ) + ), + nat_rule_collections=dict( + type='list', + disposition='/properties/natRuleCollections', + options=dict( + priority=dict( + type='int', + disposition='properties/*' + ), + action=dict( + type='str', + disposition='properties/action/type', + choices=['snat', + 'dnat'], + pattern='camelize' + ), + rules=dict( + type='list', + disposition='properties/*', + options=dict( + name=dict( + type='str' + ), + description=dict( + type='str' + ), + source_addresses=dict( + type='list', + disposition='sourceAddresses' + ), + destination_addresses=dict( + type='list', + disposition='destinationAddresses' + ), + destination_ports=dict( + type='list', + disposition='destinationPorts' + ), + protocols=dict( + type='list' + ), + translated_address=dict( + type='str', + disposition='translatedAddress' + ), + translated_port=dict( + type='str', + disposition='translatedPort' + ) + ) + ), + name=dict( + type='str' + ) + ) + ), + network_rule_collections=dict( + type='list', + disposition='/properties/networkRuleCollections', + options=dict( + priority=dict( + type='int', + disposition='properties/*' + ), + action=dict( + type='str', + choices=['allow', + 'deny'], + disposition='properties/action/type', + pattern='camelize' + ), + rules=dict( + type='list', + disposition='properties/*', + options=dict( + name=dict( + type='str' + ), + description=dict( + type='str' + ), + protocols=dict( + type='list' + ), + source_addresses=dict( + type='list', + disposition='sourceAddresses' + ), + destination_addresses=dict( + type='list', + disposition='destinationAddresses' + ), + destination_ports=dict( + type='list', + disposition='destinationPorts' + ) + ) + ), + name=dict( + type='str' + ) + ) + ), + ip_configurations=dict( + type='list', + disposition='/properties/ipConfigurations', + options=dict( + subnet=dict( + type='raw', + disposition='properties/subnet/id', + pattern=('/subscriptions/{subscription_id}/resourceGroups' + '/{resource_group}/providers/Microsoft.Network' + '/virtualNetworks/{virtual_network_name}/subnets' + '/{name}') + ), + public_ip_address=dict( + type='raw', + disposition='properties/publicIPAddress/id', + pattern=('/subscriptions/{subscription_id}/resourceGroups' + '/{resource_group}/providers/Microsoft.Network' + '/publicIPAddresses/{name}') + ), + name=dict( + type='str' + ) + ) + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.name = None + self.body = {} + + self.results = dict(changed=False) + self.mgmt_client = None + self.state = None + self.url = None + self.status_code = [200, 201, 202] + self.to_do = Actions.NoAction + + self.body = {} + self.query_parameters = {} + self.query_parameters['api-version'] = '2018-11-01' + self.header_parameters = {} + self.header_parameters['Content-Type'] = 'application/json; charset=utf-8' + + super(AzureRMAzureFirewalls, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()) + ['tags']: + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + self.body[key] = kwargs[key] + + self.inflate_parameters(self.module_arg_spec, self.body, 0) + + old_response = None + response = None + + self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient, + base_url=self._cloud_environment.endpoints.resource_manager) + + resource_group = self.get_resource_group(self.resource_group) + + if 'location' not in self.body: + self.body['location'] = resource_group.location + + self.url = ('/subscriptions' + + '/' + self.subscription_id + + '/resourceGroups' + + '/' + self.resource_group + + '/providers' + + '/Microsoft.Network' + + '/azureFirewalls' + + '/' + self.name) + + old_response = self.get_resource() + + if not old_response: + self.log("AzureFirewall instance doesn't exist") + + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + self.log('AzureFirewall instance already exists') + + if self.state == 'absent': + self.to_do = Actions.Delete + else: + modifiers = {} + self.create_compare_modifiers(self.module_arg_spec, '', modifiers) + self.results['modifiers'] = modifiers + self.results['compare'] = [] + if not self.default_compare(modifiers, self.body, old_response, '', self.results): + self.to_do = Actions.Update + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log('Need to Create / Update the AzureFirewall instance') + + if self.check_mode: + self.results['changed'] = True + return self.results + + response = self.create_update_resource() + + # if not old_response: + self.results['changed'] = True + # else: + # self.results['changed'] = old_response.__ne__(response) + self.log('Creation / Update done') + elif self.to_do == Actions.Delete: + self.log('AzureFirewall instance deleted') + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_resource() + + # make sure instance is actually deleted, for some Azure resources, instance is hanging around + # for some time after deletion -- this should be really fixed in Azure + while self.get_resource(): + time.sleep(20) + else: + self.log('AzureFirewall instance unchanged') + self.results['changed'] = False + response = old_response + + if response: + self.results["id"] = response["id"] + while response['properties']['provisioningState'] == 'Updating': + time.sleep(30) + response = self.get_resource() + + return self.results + + def create_update_resource(self): + # self.log('Creating / Updating the AzureFirewall instance {0}'.format(self.)) + + try: + response = self.mgmt_client.query(self.url, + 'PUT', + self.query_parameters, + self.header_parameters, + self.body, + self.status_code, + 600, + 30) + except CloudError as exc: + self.log('Error attempting to create the AzureFirewall instance.') + self.fail('Error creating the AzureFirewall instance: {0}'.format(str(exc))) + + try: + response = json.loads(response.text) + except Exception: + response = {'text': response.text} + + return response + + def delete_resource(self): + # self.log('Deleting the AzureFirewall instance {0}'.format(self.)) + try: + response = self.mgmt_client.query(self.url, + 'DELETE', + self.query_parameters, + self.header_parameters, + None, + self.status_code, + 600, + 30) + except CloudError as e: + self.log('Error attempting to delete the AzureFirewall instance.') + self.fail('Error deleting the AzureFirewall instance: {0}'.format(str(e))) + + return True + + def get_resource(self): + # self.log('Checking if the AzureFirewall instance {0} is present'.format(self.)) + found = False + try: + response = self.mgmt_client.query(self.url, + 'GET', + self.query_parameters, + self.header_parameters, + None, + self.status_code, + 600, + 30) + response = json.loads(response.text) + found = True + self.log("Response : {0}".format(response)) + # self.log("AzureFirewall instance : {0} found".format(response.name)) + except CloudError as e: + self.log('Did not find the AzureFirewall instance.') + if found is True: + return response + + return False + + +def main(): + AzureRMAzureFirewalls() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_azurefirewall_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_azurefirewall_info.py new file mode 100644 index 000000000..be6b08a36 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_azurefirewall_info.py @@ -0,0 +1,270 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Liu Qingyi, (@smile37773) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_azurefirewall_info +version_added: '0.1.2' +short_description: Get AzureFirewall info +description: + - Get info of AzureFirewall. +options: + resource_group: + description: + - The name of the resource group. + type: str + name: + description: + - Resource name. + type: str +extends_documentation_fragment: + - azure.azcollection.azure +author: + - Liu Qingyi (@smile37773) + +''' + +EXAMPLES = ''' +- name: List all Azure Firewalls for a given subscription + azure_rm_azurefirewall_info: +- name: List all Azure Firewalls for a given resource group + azure_rm_azurefirewall_info: + resource_group: myResourceGroup +- name: Get Azure Firewall + azure_rm_azurefirewall_info: + resource_group: myResourceGroup + name: myAzureFirewall + +''' + +RETURN = ''' +firewalls: + description: + - A list of dict results where the key is the name of the AzureFirewall and the values are the facts for that AzureFirewall. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/ + myResourceGroup/providers/Microsoft.Network/azureFirewalls/myAzureFirewall" + name: + description: + - Resource name. + returned: always + type: str + sample: "myAzureFirewall" + location: + description: + - Resource location. + returned: always + type: str + sample: "eastus" + tags: + description: + - Resource tags. + returned: always + type: dict + sample: { "tag": "value" } + etag: + description: + - Gets a unique read-only string that changes whenever the resource is updated. + returned: always + type: str + sample: "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + nat_rule_collections: + description: + - Collection of NAT rule collections used by Azure Firewall. + type: list + network_rule_collections: + description: + - Collection of network rule collections used by Azure Firewall. + type: list + ip_configurations: + description: + - IP configuration of the Azure Firewall resource. + type: list + provisioning_state: + description: + - The current state of the gallery. + type: str + sample: "Succeeded" + +''' + +import time +import json +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_rest import GenericRestClient +from copy import deepcopy +try: + from msrestazure.azure_exceptions import CloudError +except Exception: + # handled in azure_rm_common + pass + + +class AzureRMAzureFirewallsInfo(AzureRMModuleBase): + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str' + ), + name=dict( + type='str' + ) + ) + + self.resource_group = None + self.name = None + + self.results = dict(changed=False) + self.mgmt_client = None + self.state = None + self.url = None + self.status_code = [200] + + self.query_parameters = {} + self.query_parameters['api-version'] = '2018-11-01' + self.header_parameters = {} + self.header_parameters['Content-Type'] = 'application/json; charset=utf-8' + + self.mgmt_client = None + super(AzureRMAzureFirewallsInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient, + base_url=self._cloud_environment.endpoints.resource_manager) + + if (self.resource_group is not None and self.name is not None): + self.results['firewalls'] = self.get() + elif (self.resource_group is not None): + self.results['firewalls'] = self.list() + else: + self.results['firewalls'] = self.listall() + return self.results + + def get(self): + response = None + results = {} + # prepare url + self.url = ('/subscriptions' + + '/{{ subscription_id }}' + + '/resourceGroups' + + '/{{ resource_group }}' + + '/providers' + + '/Microsoft.Network' + + '/azureFirewalls' + + '/{{ azure_firewall_name }}') + self.url = self.url.replace('{{ subscription_id }}', self.subscription_id) + self.url = self.url.replace('{{ resource_group }}', self.resource_group) + self.url = self.url.replace('{{ azure_firewall_name }}', self.name) + + try: + response = self.mgmt_client.query(self.url, + 'GET', + self.query_parameters, + self.header_parameters, + None, + self.status_code, + 600, + 30) + results = json.loads(response.text) + # self.log('Response : {0}'.format(response)) + except CloudError as e: + self.log('Could not get info for @(Model.ModuleOperationNameUpper).') + + return self.format_item(results) + + def list(self): + response = None + results = {} + # prepare url + self.url = ('/subscriptions' + + '/{{ subscription_id }}' + + '/resourceGroups' + + '/{{ resource_group }}' + + '/providers' + + '/Microsoft.Network' + + '/azureFirewalls') + self.url = self.url.replace('{{ subscription_id }}', self.subscription_id) + self.url = self.url.replace('{{ resource_group }}', self.resource_group) + + try: + response = self.mgmt_client.query(self.url, + 'GET', + self.query_parameters, + self.header_parameters, + None, + self.status_code, + 600, + 30) + results = json.loads(response.text) + # self.log('Response : {0}'.format(response)) + except CloudError as e: + self.log('Could not get info for @(Model.ModuleOperationNameUpper).') + + return [self.format_item(x) for x in results['value']] if results['value'] else [] + + def listall(self): + response = None + results = {} + # prepare url + self.url = ('/subscriptions' + + '/{{ subscription_id }}' + + '/providers' + + '/Microsoft.Network' + + '/azureFirewalls') + self.url = self.url.replace('{{ subscription_id }}', self.subscription_id) + + try: + response = self.mgmt_client.query(self.url, + 'GET', + self.query_parameters, + self.header_parameters, + None, + self.status_code, + 600, + 30) + results = json.loads(response.text) + # self.log('Response : {0}'.format(response)) + except CloudError as e: + self.log('Could not get info for @(Model.ModuleOperationNameUpper).') + + return [self.format_item(x) for x in results['value']] if results['value'] else [] + + def format_item(self, item): + d = { + 'id': item['id'], + 'name': item['name'], + 'location': item['location'], + 'etag': item['etag'], + 'tags': item.get('tags'), + 'nat_rule_collections': item['properties']['natRuleCollections'], + 'network_rule_collections': item['properties']['networkRuleCollections'], + 'ip_configurations': item['properties']['ipConfigurations'], + 'provisioning_state': item['properties']['provisioningState'] + } + return d + + +def main(): + AzureRMAzureFirewallsInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_backupazurevm.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_backupazurevm.py new file mode 100644 index 000000000..25e9870d9 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_backupazurevm.py @@ -0,0 +1,389 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 Suyeb Ansari (@suyeb786), Pallavi Chaudhari(@PallaviC2510) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = \ + ''' +--- +module: azure_rm_backupazurevm +version_added: '1.1.0' +short_description: Back up an Azure Virtual Machine using Azure Backup +description: + - Back up an Azure VM using Azure Backup. + - Enabling/Updating protection for the Azure VM. + - Trigger an on-demand backup for a protected Azure VM. + - Stop protection but retain existing data. + - Stop protection and delete data. +options: + resource_group: + description: + - The name of the resource group. + required: true + type: str + recovery_vault_name: + description: + - The name of the Azure Recovery Service Vault. + required: true + type: str + resource_id: + description: + - Azure Virtual Machine Resource ID. + required: true + type: str + backup_policy_id: + description: + - Backup Policy ID present under Recovery Service Vault mentioned in recovery_vault_name field. + required: true + type: str + recovery_point_expiry_time: + description: + - Recovery Point Expiry Time in UTC. + - This used if C(state) parameter is C(backup). + required: false + type: str + version_added: '1.15.0' + state: + description: + - Assert the state of the protection item. + - Use C(create) for enabling protection for the Azure VM. + - Use C(update) for changing the policy of protection. + - Use C(stop) for stop protection but retain existing data. + - Use C(delete) for stop protection and delete data. + - Use C(backup) for on-demand backup. + default: create + type: str + choices: + - create + - update + - delete + - stop + - backup +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags +author: + - Suyeb Ansari (@suyeb786) + - Pallavi Chaudhari (@PallaviC2510) + +''' + +EXAMPLES = \ + ''' + - name: Enabling/Updating protection for the Azure VM + azure_rm_backupazurevm: + resource_group: 'myResourceGroup' + recovery_vault_name: 'testVault' + resource_id: '/subscriptions/00000000-0000-0000-0000-000000000000/ \ + resourceGroups/myResourceGroup/providers/Microsoft.Compute/virtualMachines/testVM' + backup_policy_id: '/subscriptions/00000000-0000-0000-0000-000000000000/ \ + resourceGroups/myResourceGroup/providers/microsoft.recoveryservices/vaults/testVault/backupPolicies/ProdPolicy' + state: 'create' + - name: Stop protection but retain existing data + azure_rm_backupazurevm: + resource_group: 'myResourceGroup' + recovery_vault_name: 'testVault' + resource_id: '/subscriptions/00000000-0000-0000-0000-000000000000/ \ + resourceGroups/myResourceGroup/providers/Microsoft.Compute/virtualMachines/testVM' + state: 'stop' + - name: Stop protection and delete data + azure_rm_backupazurevm: + resource_group: 'myResourceGroup' + recovery_vault_name: 'testVault' + resource_id: '/subscriptions/00000000-0000-0000-0000-000000000000/ \ + resourceGroups/myResourceGroup/providers/Microsoft.Compute/virtualMachines/testVM' + state: 'delete' + - name: Trigger an on-demand backup for a protected Azure VM + azure_rm_backupazurevm: + resource_group: 'myResourceGroup' + recovery_vault_name: 'testVault' + resource_id: '/subscriptions/00000000-0000-0000-0000-000000000000/ \ + resourceGroups/myResourceGroup/providers/Microsoft.Compute/virtualMachines/testVM' + backup_policy_id: '/subscriptions/00000000-0000-0000-0000-000000000000/ \ + resourceGroups/myResourceGroup/providers/microsoft.recoveryservices/vaults/testVault/backupPolicies/ProdPolicy' + recovery_point_expiry_time: '2023-02-09T06:00:00Z' + state: 'backup' + ''' + +RETURN = \ + ''' +id: + description: + - VM backup protection details. + returned: always + type: str + sample: '{"response":{"id":"protection_id","name":"protection_item_name","properties":{}}}' +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_rest import GenericRestClient +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt +import re +import json +import time + + +class Actions: + (NoAction, Create, Update, Delete) = range(4) + + +class BackupAzureVM(AzureRMModuleBaseExt): + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + recovery_vault_name=dict( + type='str', + required=True + ), + resource_id=dict( + type='str', + required=True + ), + backup_policy_id=dict( + type='str', + required=True + ), + recovery_point_expiry_time=dict( + type='str' + ), + state=dict( + type='str', + default='create', + choices=['create', 'update', 'delete', 'stop', 'backup'] + ) + ) + + self.resource_group = None + self.recovery_vault_name = None + self.resource_id = None + self.backup_policy_id = None + self.recovery_point_expiry_time = None + self.state = None + + self.results = dict(changed=False) + self.mgmt_client = None + self.url = None + self.status_code = [200, 201, 202, 204] + self.to_do = Actions.NoAction + + self.body = {} + self.query_parameters = {} + self.query_parameters['api-version'] = None + self.header_parameters = {} + self.header_parameters['Content-Type'] = 'application/json; charset=utf-8' + + super(BackupAzureVM, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def get_api_version(self): + return '2019-05-13' if self.state == 'create' or self.state == 'update' or self.state == 'delete' or self.state == 'stop' else '2016-12-01' + + def get_url(self): + if self.state == 'create' or self.state == 'update' or self.state == 'delete' or self.state == 'stop': + return '/subscriptions' + '/' + self.subscription_id \ + + '/resourceGroups' + '/' + self.resource_group + '/providers' \ + + '/Microsoft.RecoveryServices' + '/vaults' + '/' \ + + self.recovery_vault_name \ + + '/backupFabrics/Azure/protectionContainers/' \ + + 'iaasvmcontainer;iaasvmcontainerv2;' + self.parse_resource_to_dict(self.resource_id)['resource_group']\ + + ';' + self.parse_resource_to_dict(self.resource_id)['name'] + '/protectedItems/' \ + + 'vm;iaasvmcontainerv2;' + self.parse_resource_to_dict(self.resource_id)['resource_group'] + ';' \ + + self.parse_resource_to_dict(self.resource_id)['name'] + if self.state == 'backup': + return '/subscriptions' + '/' + self.subscription_id \ + + '/resourceGroups' + '/' + self.resource_group + '/providers' \ + + '/Microsoft.RecoveryServices' + '/vaults' + '/' \ + + self.recovery_vault_name \ + + '/backupFabrics/Azure/protectionContainers/' \ + + 'iaasvmcontainer;iaasvmcontainerv2;' + self.parse_resource_to_dict(self.resource_id)['resource_group'] \ + + ';' + self.parse_resource_to_dict(self.resource_id)['name'] + '/protectedItems/' \ + + 'vm;iaasvmcontainerv2;' + self.parse_resource_to_dict(self.resource_id)['resource_group'] + ';' \ + + self.parse_resource_to_dict(self.resource_id)['name'] + '/backup' + + def get_body(self): + if self.state == 'create' or self.state == 'update': + return { + 'properties': + { + 'protectedItemType': 'Microsoft.Compute/virtualMachines', + 'sourceResourceId': self.resource_id, + 'policyId': self.backup_policy_id + } + } + elif self.state == 'backup': + body = { + "properties": { + "objectType": "IaasVMBackupRequest" + } + } + if self.recovery_point_expiry_time: + body["properties"]["recoveryPointExpiryTimeInUTC"] = self.recovery_point_expiry_time + + return body + elif self.state == 'stop': + return { + "properties": { + "protectedItemType": "Microsoft.Compute/virtualMachines", + "sourceResourceId": self.resource_id, + "protectionState": "ProtectionStopped" + } + } + else: + return {} + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()): + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + self.body[key] = kwargs[key] + + self.inflate_parameters(self.module_arg_spec, self.body, 0) + + self.query_parameters['api-version'] = self.get_api_version() + self.url = self.get_url() + self.body = self.get_body() + old_response = None + response = None + + self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient, + base_url=self._cloud_environment.endpoints.resource_manager) + + changed = False + if self.state == 'create' or self.state == 'update': + changed = True + response = self.enable_update_protection_for_azure_vm() + if self.state == 'delete': + changed = True + response = self.stop_protection_and_delete_data() + if self.state == 'stop': + changed = True + response = self.stop_protection_but_retain_existing_data() + if self.state == 'backup': + changed = True + response = self.trigger_on_demand_backup() + self.results['response'] = response + self.results['changed'] = changed + + return self.results + + def enable_update_protection_for_azure_vm(self): + + # self.log('Enabling/Updating protection for the Azure Virtual Machine {0}'.format(self.)) + + try: + response = self.mgmt_client.query( + self.url, + 'PUT', + self.query_parameters, + self.header_parameters, + self.body, + self.status_code, + 600, + 30, + ) + except Exception as e: + self.log('Error in enabling/updating protection for Azure VM.') + self.fail( + 'Error in creating/updating protection for Azure VM {0}'.format(str(e))) + + try: + response = json.loads(response.text) + except Exception: + response = {'text': response.text} + + return response + + def stop_protection_but_retain_existing_data(self): + + # self.log('Stop protection and retain existing data{0}'.format(self.)) + + try: + response = self.mgmt_client.query( + self.url, + 'PUT', + self.query_parameters, + self.header_parameters, + self.body, + self.status_code, + 600, + 30, + ) + except Exception as e: + self.log('Error attempting to stop protection.') + self.fail('Error in disabling the protection: {0}'.format(str(e))) + + try: + response = json.loads(response.text) + except Exception: + response = {'text': response.text} + + return response + + def stop_protection_and_delete_data(self): + + # self.log('Stop protection and delete data{0}'.format(self.)) + + try: + response = self.mgmt_client.query( + self.url, + 'DELETE', + self.query_parameters, + self.header_parameters, + None, + self.status_code, + 600, + 30, + ) + except Exception as e: + self.log('Error attempting to delete backup.') + self.fail('Error deleting the azure backup: {0}'.format(str(e))) + + try: + response = json.loads(response.text) + except Exception: + response = {'text': response.text} + + return response + + def trigger_on_demand_backup(self): + + # self.log('Trigger an on-demand backup for a protected Azure VM{0}'.format(self.)) + + try: + response = self.mgmt_client.query( + self.url, + 'POST', + self.query_parameters, + self.header_parameters, + self.body, + self.status_code, + 600, + 30, + ) + except Exception as e: + self.log('Error attempting to backup azure vm.') + self.fail( + 'Error while taking on-demand backup: {0}'.format(str(e))) + + try: + response = json.loads(response.text) + except Exception: + response = {'text': response.text} + + return response + + +def main(): + BackupAzureVM() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_backupazurevm_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_backupazurevm_info.py new file mode 100644 index 000000000..b8ad0cc8a --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_backupazurevm_info.py @@ -0,0 +1,173 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 Suyeb Ansari(@suyeb786), Pallavi Chaudhari(@PallaviC2510) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = \ + ''' +--- +module: azure_rm_backupazurevm_info +version_added: '1.1.0' +short_description: Back up an Azure Virtual Machine using Azure Backup Information +description: + - Get Recovery point details for protected items. +options: + resource_group: + description: + - The name of the resource group. + required: true + type: str + recovery_vault_name: + description: + - The name of the Azure Recovery Service Vault. + required: true + type: str + resource_id: + description: + - Azure Virtual Machine Resource ID. + required: true + type: str +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags +author: + - Suyeb Ansari (@suyeb786) + - Pallavi Chaudhari (@PallaviC2510) + +''' + +EXAMPLES = \ + ''' + - name: Get Recovery Point Details + azure_rm_backupazurevm_info: + resource_group: 'myResourceGroup' + recovery_vault_name: 'testVault' + resource_id: '/subscriptions/00000000-0000-0000-0000-000000000000/ \ + resourceGroups/myResourceGroup/providers/Microsoft.Compute/virtualMachines/testVM' + ''' + +RETURN = \ + ''' +id: + description: + - VM Protection details. + returned: always + type: str + sample: '{"response":{"id":"protection_id","name":"protection_item_name","properties":{}}}' +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_rest import GenericRestClient +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt +import re +import json +import time + + +class Actions: + (NoAction, Create, Update, Delete) = range(4) + + +class BackupAzureVMInfo(AzureRMModuleBaseExt): + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + recovery_vault_name=dict( + type='str', + required=True + ), + resource_id=dict( + type='str', + required=True + ) + ) + + self.resource_group = None + self.recovery_vault_name = None + self.resource_id = None + + self.results = dict(changed=False) + self.mgmt_client = None + self.url = None + self.status_code = [200, 201, 202, 204] + self.to_do = Actions.NoAction + + self.body = {} + self.query_parameters = {} + self.query_parameters['api-version'] = '2019-05-13' + self.header_parameters = {} + self.header_parameters['Content-Type'] = 'application/json; charset=utf-8' + + super(BackupAzureVMInfo, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def get_url(self): + return '/subscriptions' + '/' + self.subscription_id \ + + '/resourceGroups' + '/' + self.resource_group + '/providers' \ + + '/Microsoft.RecoveryServices' + '/vaults' + '/' \ + + self.recovery_vault_name \ + + '/backupFabrics/Azure/protectionContainers/' \ + + 'iaasvmcontainer;iaasvmcontainerv2;' + self.parse_resource_to_dict(self.resource_id)['resource_group'] \ + + ';' + self.parse_resource_to_dict(self.resource_id)['name'] + '/protectedItems/' \ + + 'vm;iaasvmcontainerv2;' + self.parse_resource_to_dict(self.resource_id)['resource_group'] + ';' \ + + self.parse_resource_to_dict(self.resource_id)['name'] + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()): + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + self.body[key] = kwargs[key] + + self.inflate_parameters(self.module_arg_spec, self.body, 0) + self.url = self.get_url() + + response = None + + self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient, + base_url=self._cloud_environment.endpoints.resource_manager) + + response = self.get_recovery_point_info() + self.results['response'] = response + return self.results + + def get_recovery_point_info(self): + + # self.log('Fetching protection details for the Azure Virtual Machine {0}'.format(self.)) + + try: + response = self.mgmt_client.query( + self.url, + 'GET', + self.query_parameters, + self.header_parameters, + None, + self.status_code, + 600, + 30, + ) + except Exception as e: + self.log('Error in fetching recovery point.') + self.fail('Error in fetching recovery point {0}'.format(str(e))) + + try: + response = json.loads(response.text) + except Exception: + response = {'text': response.text} + + return response + + +def main(): + BackupAzureVMInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_backuppolicy.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_backuppolicy.py new file mode 100644 index 000000000..7ec699566 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_backuppolicy.py @@ -0,0 +1,459 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 Cole Neubauer, (@coleneubauer) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: azure_rm_backuppolicy +version_added: "1.4.0" +short_description: Manage Azure Backup Policy +description: + - Create and delete instance of Azure Backup Policy. + +options: + vault_name: + description: + - The name of the Recovery Services Vault the policy belongs to. + required: true + type: str + name: + description: + - The name of the backup policy. + required: true + type: str + resource_group: + description: + - The name of the resource group the vault is in. + required: true + type: str + state: + description: + - Assert the state of the backup policy. + - Use C(present) to create or update a backup policy and C(absent) to delete it. + default: present + choices: + - absent + - present + type: str + backup_management_type: + description: + - Defines the type of resource the policy will be applied to. + choices: + - AzureIaasVM + type: str + schedule_run_time: + description: + - The hour to run backups. + - Valid choices are on 24 hour scale (0-23). + type: int + instant_recovery_snapshot_retention: + description: + - How many days to retain instant recovery snapshots. + type: int + schedule_run_frequency: + description: + - The frequency to run the policy. + choices: + - Daily + - Weekly + type: str + schedule_days: + description: + - List of days to execute the schedule. + - Does not apply to Daily frequency. + type: list + elements: str + weekly_retention_count: + description: + - The amount of weeks to retain backups. + type: int + daily_retention_count: + description: + - The amount of days to retain backups. + - Does not apply to Weekly frequency. + type: int + schedule_weekly_frequency: + description: + - The amount of weeks between backups. + - Backup every I(schedule_weekly_frequency) week(s). + - Azure will default behavior to running weekly if this is left blank. + - Does not apply to Daily frequency. + type: int + time_zone: + description: + - Timezone to apply I(schedule_run_time). + default: UTC + type: str +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Cole Neubauer(@coleneubauer) +''' + +EXAMPLES = ''' + - name: Delete a backup policy + azure_rm_backuppolicy: + vault_name: Vault_Name + name: Policy_Name + resource_group: Resource_Group_Name + state: absent + + - name: Create a daily VM backup policy + azure_rm_backuppolicy: + vault_name: Vault_Name + name: Policy_Name + resource_group: Resource_Group_Name + state: present + backup_management_type: "AzureIaasVM" + schedule_run_frequency: "Daily" + instant_recovery_snapshot_retention: 2 + daily_retention_count: 12 + time_zone: "Pacific Standard Time" + schedule_run_time: 14 + + - name: Create a weekly VM backup policy + azure.azcollection.azure_rm_backuppolicy: + vault_name: Vault_Name + name: Policy_Name + resource_group: Resource_Group_Name + state: present + backup_management_type: "AzureIaasVM" + schedule_run_frequency: "Weekly" + instant_recovery_snapshot_retention: 5 + weekly_retention_count: 4 + schedule_days: + - "Monday" + - "Wednesday" + - "Friday" + time_zone: "Pacific Standard Time" + schedule_run_time: 8 + +''' + +RETURN = ''' +id: + description: + - Id of specified backup policy. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/providers/Microsoft.RecoveryServices/vaults/Vault_Name/backupPolicies/Policy_Name" +location: + description: + - Location of backup policy. + type: str + returned: always + sample: eastus +name: + description: + - Name of backup policy. + type: str + returned: always + sample: DefaultPolicy +type: + description: + - Type of backup policy. + type: str + returned: always + sample: Microsoft.RecoveryServices/vaults/backupPolicies +''' + +import uuid +from datetime import datetime +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError + +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMBackupPolicy(AzureRMModuleBase): + """Configuration class for an Azure RM Backup Policy""" + + def __init__(self): + self.module_arg_spec = dict( + vault_name=dict(type='str', required=True), + name=dict(type='str', required=True), + resource_group=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + backup_management_type=dict(type='str', choices=['AzureIaasVM']), + schedule_run_time=dict(type='int'), + instant_recovery_snapshot_retention=dict(type='int'), + schedule_run_frequency=dict(type='str', choices=['Daily', 'Weekly']), + schedule_days=dict(type='list', elements='str'), + weekly_retention_count=dict(type='int'), + daily_retention_count=dict(type='int'), + schedule_weekly_frequency=dict(type='int'), + time_zone=dict(type='str', default='UTC'), + ) + + self.vault_name = None + self.name = None + self.resource_group = None + self.backup_management_type = None + self.schedule_run_time = None + self.instant_recovery_snapshot_retention = None + self.schedule_run_frequency = None + self.schedule_days = None + self.weekly_retention_count = None + self.schedule_weekly_frequency = None + self.daily_retention_count = None + self.time_zone = None + + self.results = dict( + changed=False, + id=None, + ) + + required_if = [('schedule_run_frequency', 'Weekly', ['schedule_days', 'weekly_retention_count', 'schedule_run_time']), + ('schedule_run_frequency', 'Daily', ['daily_retention_count', 'schedule_run_time']), + ('state', 'present', ['schedule_run_frequency', 'backup_management_type']), + ('log_mode', 'file', ['log_path'])] + + super(AzureRMBackupPolicy, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + required_if=required_if) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + existing_backup_policy = None + response = None + + existing_backup_policy = self.get_backup_policy() + + if existing_backup_policy: + self.set_results(existing_backup_policy) + + # either create or update + if self.state == 'present': + # check if the backup policy exists + if not existing_backup_policy: + self.log("Backup policy {0} for vault {1} in resource group {2} does not exist.".format(self.name, + self.vault_name, + self.resource_group)) + + self.results['changed'] = True + + if self.check_mode: + return self.results + + response = self.create_or_update_backup_policy() + self.set_results(response) + + # log that we're doing an update + else: + self.log("Backup policy {0} for vault {1} in resource group {2} already exists, updating".format(self.name, + self.vault_name, + self.resource_group)) + + self.results['changed'] = True + + if self.check_mode: + return self.results + + response = self.create_or_update_backup_policy() + self.set_results(response) + + elif self.state == 'absent': + if existing_backup_policy: + self.log("Delete backup policy") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_backup_policy() + + self.log('backup policy deleted') + + else: + # If backup policy doesn't exist, that's the desired state. + self.log("Backup policy {0} for vault {1} in resource group {2} does not exist.".format(self.name, + self.vault_name, + self.resource_group)) + + return self.results + + def create_or_update_backup_policy(self): + ''' + Creates or updates backup policy. + + :return: ProtectionPolicyResource + ''' + self.log("Creating backup policy {0} for vault {1} in resource group {2}".format(self.name, + self.vault_name, + self.resource_group)) + self.log("Creating backup policy in progress") + + response = None + + try: + instant_rp_details = None + # need to represent the run time as a date_time + # year, month, day has no impact on run time but is more consistent to see it as the time of creation rather than hardcoded value + dt = datetime.utcnow() + dt = datetime(dt.year, dt.month, dt.day, 0, 0) + + # azure requires this as a list but at this time doesn't support multiple run times + # should easily be converted at this step if they support it in the future + schedule_run_times_as_datetimes = [] + schedule_run_time = self.schedule_run_time + + # basic parameter checking. try to provide a better description of faults than azure does at this time + try: + if 0 <= schedule_run_time <= 23: + schedule_run_times_as_datetimes = [(dt.replace(hour=schedule_run_time))] + else: + raise ValueError('Paramater schedule_run_time {0} is badly formed must be on the 24 hour scale'.format(schedule_run_time)) + # azure forces instant_recovery_snapshot_retention to be 5 when schedule type is Weekly + if self.schedule_run_frequency == "Weekly" and self.instant_recovery_snapshot_retention != 5: + raise ValueError('Paramater instant_recovery_snapshot_retention was {0} but must be 5 when schedule_run_frequency is Weekly' + .format(self.instant_recovery_snapshot_retention)) + + if self.schedule_run_frequency == "Weekly" and not (1 <= self.weekly_retention_count <= 5163): + raise ValueError('Paramater weekly_retention_count was {0} but must be between 1 and 5163 when schedule_run_frequency is Weekly' + .format(self.weekly_retention_count)) + + if self.schedule_run_frequency == "Daily" and not (7 <= self.daily_retention_count <= 9999): + raise ValueError('Paramater daily_retention_count was {0} but must be between 7 and 9999 when schedule_run_frequency is Daily' + .format(self.daily_retention_count)) + + except ValueError as e: + self.results['changed'] = False + self.fail(e) + + # create a schedule policy based on schedule_run_frequency + schedule_policy = self.recovery_services_backup_models.SimpleSchedulePolicy(schedule_run_frequency=self.schedule_run_frequency, + schedule_run_days=self.schedule_days, + schedule_run_times=schedule_run_times_as_datetimes, + schedule_weekly_frequency=self.schedule_weekly_frequency) + + daily_retention_schedule = None + weekly_retention_schedule = None + + # Daily backups can have a daily retention or weekly but Weekly backups cannot have a daily retention + if (self.daily_retention_count and self.schedule_run_frequency == "Daily"): + retention_duration = self.recovery_services_backup_models.RetentionDuration(count=self.daily_retention_count, duration_type="Days") + daily_retention_schedule = self.recovery_services_backup_models.DailyRetentionSchedule(retention_times=schedule_run_times_as_datetimes, + retention_duration=retention_duration) + + if (self.weekly_retention_count): + retention_duration = self.recovery_services_backup_models.RetentionDuration(count=self.weekly_retention_count, + duration_type="Weeks") + weekly_retention_schedule = self.recovery_services_backup_models.WeeklyRetentionSchedule(days_of_the_week=self.schedule_days, + retention_times=schedule_run_times_as_datetimes, + retention_duration=retention_duration) + + retention_policy = self.recovery_services_backup_models.LongTermRetentionPolicy(daily_schedule=daily_retention_schedule, + weekly_schedule=weekly_retention_schedule) + + policy_definition = None + + if self.backup_management_type == "AzureIaasVM": + # This assignment exists exclusively to deal with the following line being too long otherwise + AzureIaaSVMProtectionPolicy = self.recovery_services_backup_models.AzureIaaSVMProtectionPolicy + policy_definition = AzureIaaSVMProtectionPolicy(instant_rp_details=instant_rp_details, + schedule_policy=schedule_policy, + retention_policy=retention_policy, + instant_rp_retention_range_in_days=self.instant_recovery_snapshot_retention, + time_zone=self.time_zone) + + if policy_definition: + policy_resource = self.recovery_services_backup_models.ProtectionPolicyResource(properties=policy_definition) + response = self.recovery_services_backup_client.protection_policies.create_or_update(vault_name=self.vault_name, + resource_group_name=self.resource_group, + policy_name=self.name, + parameters=policy_resource) + + except Exception as e: + self.log('Error attempting to create the backup policy.') + self.fail("Error creating the backup policy {0} for vault {1} in resource group {2}. Error Reads: {3}".format(self.name, + self.vault_name, + self.resource_group, e)) + + return response + + def delete_backup_policy(self): + ''' + Deletes specified backup policy. + + :return: ProtectionPolicyResource + ''' + self.log("Deleting the backup policy {0} for vault {1} in resource group {2}".format(self.name, self.vault_name, self.resource_group)) + + response = None + + try: + response = self.recovery_services_backup_client.protection_policies.begin_delete(vault_name=self.vault_name, + resource_group_name=self.resource_group, + policy_name=self.name) + + except Exception as e: + self.log('Error attempting to delete the backup policy.') + self.fail("Error deleting the backup policy {0} for vault {1} in resource group {2}. Error Reads: {3}".format(self.name, + self.vault_name, + self.resource_group, e)) + + return response + + def get_backup_policy(self): + ''' + Gets the properties of the specified backup policy. + + :return: ProtectionPolicyResource + ''' + self.log("Checking if the backup policy {0} for vault {1} in resource group {2} is present".format(self.name, + self.vault_name, + self.resource_group)) + + policy = None + + try: + policy = self.recovery_services_backup_client.protection_policies.get(vault_name=self.vault_name, + resource_group_name=self.resource_group, + policy_name=self.name) + except ResourceNotFoundError as ex: + self.log("Could not find backup policy {0} for vault {1} in resource group {2}".format(self.name, self.vault_name, self.resource_group)) + + return policy + + def set_results(self, policy): + if policy: + self.results['id'] = policy.id + self.results['location'] = policy.location + self.results['name'] = policy.name + self.results['type'] = policy.type + + else: + self.results['id'] = None + self.results['location'] = None + self.results['name'] = None + self.results['type'] = None + + +def main(): + """Main execution""" + AzureRMBackupPolicy() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_backuppolicy_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_backuppolicy_info.py new file mode 100644 index 000000000..d4a086766 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_backuppolicy_info.py @@ -0,0 +1,177 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 Cole Neubauer, (@coleneubauer) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: azure_rm_backuppolicy_info +version_added: "1.4.0" +short_description: Get Info on Azure Backup Policy +description: + - Create and delete instance of Azure Backup Policy. + +options: + vault_name: + description: + - The name of the Recovery Services Vault the policy belongs to. + required: true + type: str + name: + description: + - The name of the backup policy. + required: true + type: str + resource_group: + description: + - The name of the resource group the vault is in. + required: true + type: str +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Cole Neubauer(@coleneubauer) +''' + +EXAMPLES = ''' + - name: Get backup policy information + azure_rm_backuppolicy_info: + vault_name: Vault_Name + name: Policy_Name + resource_group: Resource_Group_Name + register: backup_policy +''' + +RETURN = ''' +id: + description: + - Id of specified backup policy. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/providers/Microsoft.RecoveryServices/vaults/Vault_Name/backupPolicies/Policy_Name" +location: + description: + - Location of backup policy. + type: str + returned: always + sample: eastus +name: + description: + - Name of backup policy. + type: str + returned: always + sample: DefaultPolicy +type: + description: + - Type of backup policy. + type: str + returned: always + sample: Microsoft.RecoveryServices/vaults/backupPolicies +''' + +import uuid +from datetime import datetime +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError + +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMBackupPolicyInfo(AzureRMModuleBase): + """Information class for an Azure RM Backup Policy""" + + def __init__(self): + self.module_arg_spec = dict( + vault_name=dict(type='str', required=True), + name=dict(type='str', required=True), + resource_group=dict(type='str', required=True), + ) + + self.vault_name = None + self.name = None + self.resource_group = None + self.log_path = None + self.log_mode = None + + self.results = dict( + id=None, + changed=False + ) + + super(AzureRMBackupPolicyInfo, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + facts_module=True) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + existing_backup_policy = None + response = None + + existing_backup_policy = self.get_backup_policy() + + self.set_results(existing_backup_policy) + + return self.results + + def get_backup_policy(self): + ''' + Gets the properties of the specified backup policy. + + :return: ProtectionPolicyResource + ''' + self.log("Checking if the backup policy {0} for vault {1} in resource group {2} is present".format(self.name, + self.vault_name, + self.resource_group)) + + policy = None + + try: + policy = self.recovery_services_backup_client.protection_policies.get(vault_name=self.vault_name, + resource_group_name=self.resource_group, + policy_name=self.name) + except ResourceNotFoundError as ex: + self.log("Could not find backup policy {0} for vault {1} in resource group {2}".format(self.name, self.vault_name, self.resource_group)) + + return policy + + def set_results(self, policy): + if policy: + self.results['id'] = policy.id + self.results['location'] = policy.location + self.results['name'] = policy.name + self.results['type'] = policy.type + + else: + self.results['id'] = None + self.results['location'] = None + self.results['name'] = None + self.results['type'] = None + + +def main(): + """Main execution""" + AzureRMBackupPolicyInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_bastionhost.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_bastionhost.py new file mode 100644 index 000000000..c6b8e0d4d --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_bastionhost.py @@ -0,0 +1,549 @@ +#!/usr/bin/python +# +# Copyright (c) 2022 xuzhang3 (@xuzhang3), Fred-sun (@Fred-sun) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_bastionhost + +version_added: "1.13.0" + +short_description: Managed bastion host resource + +description: + - Create, Update or Delete bastion host resource. + +options: + name: + description: + - The name of the bastion host. + type: str + required: True + resource_group: + description: + - The name of the resource group. + type: str + required: True + location: + description: + - The resource location. + type: str + sku: + description: + - The sku of this Bastion Host. + type: dict + suboptions: + name: + description: + - The name of the SKU. + type: str + choices: + - Standard + - Basic + enable_tunneling: + description: + - Enable or Disable Tunneling feature of the Bastion Host resource. + type: bool + enable_shareable_link: + description: + - Enable or Disable Shareable Link of the Bastion Host resource. + type: bool + enable_ip_connect: + description: + - Enable or Disable IP Connect feature of the Bastion Host resource. + type: bool + enable_file_copy: + description: + - Enable or Disable File Copy feature of the Bastion Host resource. + type: bool + scale_units: + description: + - The scale units for the Bastion Host resource. + type: int + disable_copy_paste: + description: + - Enable or Disable Copy or Paste feature of the Bastion Host resource. + type: bool + ip_configurations: + description: + - An array of bastion host IP configurations. + type: list + elements: dict + suboptions: + name: + description: + - The name of bastion host ip configuration. + type: str + subnet: + description: + - Reference of the subnet resource. + type: dict + suboptions: + id: + description: + - The ID of the Subnet. + type: str + public_ip_address: + description: + - Reference of the PublicIP resource. + type: dict + suboptions: + id: + description: + - The ID of the public IP. + type: str + private_ip_allocation_method: + description: + - Private IP allocation method. + type: str + choices: + - Static + - Dynamic + state: + description: + - Assert the state of the pirvate link service. + - Use I(state=present) to create or update the link service and I(state=absent) to delete it. + type: str + default: present + choices: + - present + - absent + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - xuzhang3 (@xuzhang3) + - Fred-sun (@Fred-sun) +''' + +EXAMPLES = ''' +- name: Create bastion host info + azure_rm_bastionhost: + name: bastion-name + resource_group: myResourceGroup + ip_configurations: + - name: testip_configuration + subnet: + id: "{{ subnet_output.state.id }}" + public_ip_address: + id: "{{ publicIP_output.publicipaddresses[0].id }}" + private_ip_allocation_method: Dynamic + sku: + name: Standard + enable_tunneling: False + enable_shareable_link: False + enable_ip_connect: False + enable_file_copy: False + scale_units: 6 + disable_copy_paste: False + tags: + key1: value1 + +- name: Create bastion host info + azure_rm_bastionhost: + name: bastion-name + resource_group: myResourceGroup + state: absent + +''' + +RETURN = ''' +bastion_host: + description: + - List of Azure bastion host info. + returned: always + type: complex + contains: + id: + description: + - Resource ID of the Azure bastion host. + sample: "/subscriptions/xxx-xxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/bastionHosts/testbastion" + returned: always + type: str + name: + description: + - Name of the Azure bastion host. + returned: always + type: str + sample: linkservice + location: + description: + - Resource location. + returned: always + type: str + sample: eastus + etag: + description: + - A unique read-only string that changes whenever the resource is updated. + type: str + returned: always + sample: "fb0e3a90-6afa-4a01-9171-9c84d144a0f3" + type: + description: + - The resource type. + type: str + returned: always + sample: Microsoft.Network/bastionHosts + tags: + description: + - The resource tags. + type: list + returned: always + sample: { 'key1': 'value1' } + provisioning_state: + description: + - The provisioning state of the bastion host resource. + type: str + returned: always + sample: Succeeded + scale_units: + description: + - The scale units for the Bastion Host resource. + type: int + returned: always + sample: 2 + enable_tunneling: + description: + - Enable/Disable Tunneling feature of the Bastion Host resource. + type: bool + returned: always + sample: False + enable_shareable_link: + description: + - Enable/Disable Shareable Link of the Bastion Host resource. + type: bool + returned: always + sample: False + enable_ip_connect: + description: + - Enable/Disable IP Connect feature of the Bastion Host resource. + type: bool + returned: always + sample: False + enable_file_copy: + description: + - Enable/Disable File Copy feature of the Bastion Host resource. + type: bool + returned: always + sample: False + dns_name: + description: + - FQDN for the endpoint on which bastion host is accessible. + type: str + returned: always + sample: bst-0ca1e1b6-9969-4167-be54-5972e1395c25.bastion.azure.com + disable_copy_paste: + description: + - Enable/Disable Copy/Paste feature of the Bastion Host resource. + type: bool + returned: always + sample: False + sku: + description: + - The sku of this Bastion Host. + type: complex + returned: always + contains: + name: + description: + - The name of this Bastion Host. + type: str + returned: always + sample: Standard + ip_configurations: + description: + - An array of bastion host IP configurations. + type: complex + returned: always + contains: + name: + description: + - Name of the resource that is unique within a resource group. + - This name can be used to access the resource. + type: str + returned: always + sample: IpConf + private_ip_allocation_method: + description: + - Private IP allocation method. + type: str + returned: always + sample: Static + public_ip_address: + description: + - Reference of the PublicIP resource. + type: complex + returned: always + contains: + id: + description: + - The ID of the public IP address. + returned: always + type: str + sample: "/subscriptions/xxx-xxx/resourceGroups/myRG/providers/Microsoft.Network/publicIPAddresses/Myip" + subnet: + description: + - The reference to the subnet resource. + returned: always + type: str + contains: + id: + description: + - The ID of the subnet.. + returned: always + type: str + sample: "/subscriptions/xxx-xxx/resourceGroups/myRG/providers/Microsoft.Network/virtualNetworks/vnet/subnets/AzureBastionSubnet" +''' + +try: + from azure.core.exceptions import ResourceNotFoundError +except Exception: + # This is handled in azure_rm_common + pass + + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + + +sku_spec = dict( + name=dict(type='str', choices=['Standard', 'Basic']) +) + +subnet_spec = dict( + id=dict(type='str') +) + +public_ip_address_spec = dict( + id=dict(type='str') +) + + +class AzureRMBastionHost(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type="str", required=True), + resource_group=dict(type="str", required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + location=dict(type='str'), + ip_configurations=dict( + type='list', + elements='dict', + options=dict( + name=dict(type='str'), + subnet=dict(type='dict', options=subnet_spec), + public_ip_address=dict(type='dict', options=public_ip_address_spec), + private_ip_allocation_method=dict(type='str', choices=['Static', 'Dynamic']) + ) + ), + sku=dict(type='dict', options=sku_spec), + enable_tunneling=dict(type='bool'), + enable_shareable_link=dict(type='bool'), + enable_ip_connect=dict(type='bool'), + enable_file_copy=dict(type='bool'), + scale_units=dict(type='int'), + disable_copy_paste=dict(type='bool') + ) + + self.name = None + self.resource_group = None + self.location = None + self.tags = None + self.state = None + self.results = dict( + changed=False, + ) + self.body = {} + + super(AzureRMBastionHost, self).__init__(self.module_arg_spec, + supports_check_mode=True, + supports_tags=True, + facts_module=False) + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()) + ['tags']: + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + self.body[key] = kwargs[key] + + old_response = self.get_item() + result = None + changed = False + + if not self.location: + resource_group = self.get_resource_group(self.resource_group) + self.location = resource_group.location + + self.body['location'] = self.location + self.body['tags'] = self.tags + + if self.state == 'present': + if old_response: + update_tags, tags = self.update_tags(old_response['tags']) + if update_tags: + changed = True + self.body['tags'] = tags + + if self.body.get('disable_copy_paste') is not None: + if bool(self.body.get('disable_copy_paste')) != bool(old_response['disable_copy_paste']): + changed = True + else: + self.body['disable_copy_paste'] = old_response['disable_copy_paste'] + + if self.body.get('enable_file_copy') is not None: + if bool(self.body.get('enable_file_copy')) != bool(old_response['enable_file_copy']): + changed = True + else: + self.body['enable_file_copy'] = old_response['enable_file_copy'] + + if self.body.get('enable_ip_connect') is not None: + if bool(self.body.get('enable_ip_connect')) != bool(old_response['enable_ip_connect']): + changed = True + else: + self.body['enable_ip_connect'] = old_response['enable_ip_connect'] + + if self.body.get('enable_shareable_link') is not None: + if bool(self.body.get('enable_shareable_link')) != bool(old_response['enable_shareable_link']): + changed = True + else: + self.body['enable_shareable_link'] = old_response['enable_shareable_link'] + + if self.body.get('enable_tunneling') is not None: + if bool(self.body.get('enable_tunneling')) != bool(old_response['enable_tunneling']): + changed = True + else: + self.body['enable_tunneling'] = old_response['enable_tunneling'] + + if self.body.get('scale_units') is not None: + if self.body.get('scale_units') != old_response['scale_units']: + changed = True + else: + self.body['scale_units'] = old_response['scale_units'] + + if self.body.get('sku') is not None: + if self.body.get('sku') != old_response['sku']: + changed = True + else: + self.body['sku'] = old_response['sku'] + + if self.body.get('ip_configurations') is not None: + if self.body['ip_configurations'] != old_response['ip_configurations']: + self.fail("Bastion Host IP configuration not support to update!") + else: + self.body['ip_configurations'] = old_response['ip_configurations'] + else: + changed = True + + if changed: + if self.check_mode: + self.log("Check mode test. The bastion host is exist, will be create or updated") + else: + result = self.create_or_update(self.body) + else: + if self.check_mode: + self.log("Check mode test. The Azure Bastion Host is exist, No operation in this task") + else: + self.log("The Azure Bastion Host is exist, No operation in this task") + result = old_response + else: + if old_response: + changed = True + if self.check_mode: + self.log("Check mode test. The bastion host is exist, will be deleted") + else: + result = self.delete_resource() + else: + if self.check_mode: + self.log("The bastion host isn't exist, no action") + else: + self.log("The bastion host isn't exist, don't need to delete") + + self.results["bastion_host"] = result + self.results['changed'] = changed + return self.results + + def get_item(self): + self.log("Get properties for {0} in {1}".format(self.name, self.resource_group)) + try: + response = self.network_client.bastion_hosts.get(self.resource_group, self.name) + return self.bastion_to_dict(response) + except ResourceNotFoundError: + self.log("Could not get info for {0} in {1}".format(self.name, self.resource_group)) + + return [] + + def create_or_update(self, parameters): + self.log("Create or update the bastion host for {0} in {1}".format(self.name, self.resource_group)) + try: + response = self.network_client.bastion_hosts.begin_create_or_update(self.resource_group, self.name, parameters) + + result = self.network_client.bastion_hosts.get(self.resource_group, self.name) + return self.bastion_to_dict(result) + except Exception as ec: + self.fail("Create or Update {0} in {1} failed, mesage {2}".format(self.name, self.resource_group, ec)) + + return [] + + def delete_resource(self): + self.log("delete the bastion host for {0} in {1}".format(self.name, self.resource_group)) + try: + response = self.network_client.bastion_hosts.begin_delete(self.resource_group, self.name) + except Exception as ec: + self.fail("Delete {0} in {1} failed, message {2}".format(self.name, self.resource_group, ec)) + + return [] + + def bastion_to_dict(self, bastion_info): + bastion = bastion_info.as_dict() + result = dict( + id=bastion.get("id"), + name=bastion.get('name'), + type=bastion.get('type'), + etag=bastion.get('etag'), + location=bastion.get('location'), + tags=bastion.get('tags'), + sku=dict(), + ip_configurations=list(), + dns_name=bastion.get('dns_name'), + provisioning_state=bastion.get('provisioning_state'), + scale_units=bastion.get('scale_units'), + disable_copy_paste=bastion.get('disable_copy_paste'), + enable_file_copy=bastion.get('enable_file_copy'), + enable_ip_connect=bastion.get('enable_ip_connect'), + enable_shareable_link=bastion.get('enable_tunneling'), + enable_tunneling=bastion.get('enable_tunneling') + ) + + if bastion.get('sku'): + result['sku']['name'] = bastion['sku']['name'] + + if bastion.get('ip_configurations'): + for items in bastion['ip_configurations']: + result['ip_configurations'].append( + { + "name": items['name'], + "subnet": dict(id=items['subnet']['id']), + "public_ip_address": dict(id=items['public_ip_address']['id']), + "private_ip_allocation_method": items['private_ip_allocation_method'], + } + ) + return result + + +def main(): + AzureRMBastionHost() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_bastionhost_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_bastionhost_info.py new file mode 100644 index 000000000..745358e7c --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_bastionhost_info.py @@ -0,0 +1,335 @@ +#!/usr/bin/python +# +# Copyright (c) 2022 xuzhang3 (@xuzhang3), Fred-sun (@Fred-sun) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_bastionhost_info + +version_added: "1.13.0" + +short_description: Get Azure bastion host info + +description: + - Get facts for Azure bastion host info. + +options: + name: + description: + - Name of the bastion host. + type: str + resource_group: + description: + - The name of the resource group. + type: str + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - xuzhang3 (@xuzhang3) + - Fred-sun (@Fred-sun) +''' + +EXAMPLES = ''' +- name: Get bastion host info by name + azure_rm_bastionhost_info: + name: bastion-name + resource_group: myResourceGroup + +- name: Get all bastion host by resource group + azure_rm_bastionhost_info: + resource_group: myResourceGroup + +- name: Get all bastion hoste by subscription filter by tags + azure_rm_bastionhost_info: + tags: + - key1 + - abc +''' + +RETURN = ''' +bastion_host: + description: + - List of Azure bastion host info. + returned: always + type: complex + contains: + id: + description: + - Resource ID of the Azure bastion host. + sample: "/subscriptions/xxx-xxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/bastionHosts/testbastion" + returned: always + type: str + name: + description: + - Name of the Azure bastion host. + returned: always + type: str + sample: linkservice + location: + description: + - Resource location. + returned: always + type: str + sample: eastus + etag: + description: + - A unique read-only string that changes whenever the resource is updated. + type: str + returned: always + sample: "fb0e3a90-6afa-4a01-9171-9c84d144a0f3" + type: + description: + - The resource type. + type: str + returned: always + sample: Microsoft.Network/bastionHosts + tags: + description: + - The resource tags. + type: list + returned: always + sample: { 'key1': 'value1' } + provisioning_state: + description: + - The provisioning state of the bastion host resource. + type: str + returned: always + sample: Succeeded + scale_units: + description: + - The scale units for the Bastion Host resource. + type: int + returned: always + sample: 2 + enable_tunneling: + description: + - Enable/Disable Tunneling feature of the Bastion Host resource. + type: bool + returned: always + sample: False + enable_shareable_link: + description: + - Enable/Disable Shareable Link of the Bastion Host resource. + type: bool + returned: always + sample: False + enable_ip_connect: + description: + - Enable/Disable IP Connect feature of the Bastion Host resource. + type: bool + returned: always + sample: False + enable_file_copy: + description: + - Enable/Disable File Copy feature of the Bastion Host resource. + type: bool + returned: always + sample: False + dns_name: + description: + - FQDN for the endpoint on which bastion host is accessible. + type: str + returned: always + sample: bst-0ca1e1b6-9969-4167-be54-5972e1395c25.bastion.azure.com + disable_copy_paste: + description: + - Enable/Disable Copy/Paste feature of the Bastion Host resource. + type: bool + returned: always + sample: False + sku: + description: + - The sku of this Bastion Host. + type: complex + returned: always + contains: + name: + description: + - The name of this Bastion Host. + type: str + returned: always + sample: Standard + ip_configurations: + description: + - An array of bastion host IP configurations. + type: complex + returned: always + contains: + name: + description: + - Name of the resource that is unique within a resource group. + - This name can be used to access the resource. + type: str + returned: always + sample: IpConf + private_ip_allocation_method: + description: + - Private IP allocation method. + type: str + returned: always + sample: Static + public_ip_address: + description: + - Reference of the PublicIP resource. + type: complex + returned: always + contains: + id: + description: + - The ID of the public IP address. + returned: always + type: str + sample: "/subscriptions/xxx-xxx/resourceGroups/myRG/providers/Microsoft.Network/publicIPAddresses/Myip" + subnet: + description: + - The reference to the subnet resource. + returned: always + type: complex + contains: + id: + description: + - The ID of the subnet.. + returned: always + type: str + sample: "/subscriptions/xxx-xxx/resourceGroups/myRG/providers/Microsoft.Network/virtualNetworks/vnet/subnets/AzureBastionSubnet" +''' + +try: + from azure.core.exceptions import ResourceNotFoundError +except Exception: + # This is handled in azure_rm_common + pass + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + + +class AzureRMBastionHostInfo(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type="str"), + resource_group=dict(type="str"), + tags=dict(type='list', elements='str') + ) + + self.name = None + self.tags = None + self.resource_group = None + self.results = dict( + changed=False, + ) + + super(AzureRMBastionHostInfo, self).__init__(self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + facts_module=True) + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()): + setattr(self, key, kwargs[key]) + + if self.name is not None and self.resource_group is not None: + result = self.get_item() + elif self.resource_group is not None: + result = self.list_resourcegroup() + else: + result = self.list_by_subscription() + + self.results["bastion_host"] = [item for item in result if item and self.has_tags(item['tags'], self.tags)] + return self.results + + def get_item(self): + self.log("Get properties for {0} in {1}".format(self.name, self.resource_group)) + + try: + response = self.network_client.bastion_hosts.get(self.resource_group, self.name) + return [self.bastion_to_dict(response)] + except ResourceNotFoundError: + self.log("Could not get info for {0} in {1}".format(self.name, self.resource_group)) + + return [] + + def list_resourcegroup(self): + result = [] + self.log("List all in {0}".format(self.resource_group)) + try: + response = self.network_client.bastion_hosts.list_by_resource_group(self.resource_group) + while True: + result.append(response.next()) + except StopIteration: + pass + except Exception: + pass + return [self.bastion_to_dict(item) for item in result] + + def list_by_subscription(self): + result = [] + self.log("List all in by subscription") + try: + response = self.network_client.bastion_hosts.list() + while True: + result.append(response.next()) + except StopIteration: + pass + except Exception: + pass + return [self.bastion_to_dict(item) for item in result] + + def bastion_to_dict(self, bastion_info): + bastion = bastion_info.as_dict() + result = dict( + id=bastion.get("id"), + name=bastion.get('name'), + type=bastion.get('type'), + etag=bastion.get('etag'), + location=bastion.get('location'), + tags=bastion.get('tags'), + sku=dict(), + ip_configurations=list(), + dns_name=bastion.get('dns_name'), + provisioning_state=bastion.get('provisioning_state'), + scale_units=bastion.get('scale_units'), + disable_copy_paste=bastion.get('disable_copy_paste'), + enable_file_copy=bastion.get('enable_file_copy'), + enable_ip_connect=bastion.get('enable_ip_connect'), + enable_shareable_link=bastion.get('enable_tunneling'), + enable_tunneling=bastion.get('enable_tunneling') + ) + + if bastion.get('sku'): + result['sku']['name'] = bastion['sku']['name'] + + if bastion.get('ip_configurations'): + for items in bastion['ip_configurations']: + result['ip_configurations'].append( + { + "name": items['name'], + "subnet": dict(id=items['subnet']['id']), + "public_ip_address": dict(id=items['public_ip_address']['id']), + "private_ip_allocation_method": items['private_ip_allocation_method'], + } + ) + return result + + +def main(): + AzureRMBastionHostInfo() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_batchaccount.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_batchaccount.py new file mode 100644 index 000000000..afb4d3d51 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_batchaccount.py @@ -0,0 +1,337 @@ +#!/usr/bin/python +# +# Copyright (C) 2019 Junyi Yi (@JunyiYi) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# +# ---------------------------------------------------------------------------- + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_batchaccount +version_added: "0.1.2" +short_description: Manages a Batch Account on Azure +description: + - Create, update and delete instance of Azure Batch Account. + +options: + resource_group: + description: + - The name of the resource group in which to create the Batch Account. + required: true + type: str + name: + description: + - The name of the Batch Account. + required: true + type: str + location: + description: + - Specifies the supported Azure location where the resource exists. + type: str + auto_storage_account: + description: + - Existing storage account with which to associate the Batch Account. + - It can be the storage account name which is in the same resource group. + - It can be the storage account ID. Fox example "/subscriptions/{subscription_id}/resourceGroups/ + {resource_group}/providers/Microsoft.Storage/storageAccounts/{name}". + - It can be a dict which contains I(name) and I(resource_group) of the storage account. + key_vault: + description: + - Existing key vault with which to associate the Batch Account. + - It can be the key vault name which is in the same resource group. + - It can be the key vault ID. For example "/subscriptions/{subscription_id}/resourceGroups/ + {resource_group}/providers/Microsoft.KeyVault/vaults/{name}". + - It can be a dict which contains I(name) and I(resource_group) of the key vault. + pool_allocation_mode: + description: + - The pool acclocation mode of the Batch Account. + default: batch_service + choices: + - batch_service + - user_subscription + type: str + state: + description: + - Assert the state of the Batch Account. + - Use C(present) to create or update a Batch Account and C(absent) to delete it. + default: present + type: str + choices: + - present + - absent + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Junyi Yi (@JunyiYi) +''' + +EXAMPLES = ''' + - name: Create Batch Account + azure_rm_batchaccount: + resource_group: MyResGroup + name: mybatchaccount + location: eastus + auto_storage_account: + name: mystorageaccountname + pool_allocation_mode: batch_service +''' + +RETURN = ''' +id: + description: + - The ID of the Batch account. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Batch/batchAccounts/sampleacct" +account_endpoint: + description: + - The account endpoint used to interact with the Batch service. + returned: always + type: str + sample: sampleacct.westus.batch.azure.com +''' + +import time +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import normalize_location_name +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt +from ansible.module_utils.common.dict_transformations import _snake_to_camel + +try: + from msrestazure.azure_exceptions import CloudError + from msrest.polling import LROPoller + from msrestazure.azure_operation import AzureOperationPoller + from msrest.serialization import Model + from azure.mgmt.batch import BatchManagementClient +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMBatchAccount(AzureRMModuleBaseExt): + """Configuration class for an Azure RM Batch Account resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + required=True, + type='str' + ), + name=dict( + required=True, + type='str' + ), + location=dict( + type='str', + updatable=False, + disposition='/' + ), + auto_storage_account=dict( + type='raw' + ), + key_vault=dict( + type='raw', + no_log=True, + updatable=False, + disposition='/' + ), + pool_allocation_mode=dict( + default='batch_service', + type='str', + choices=['batch_service', 'user_subscription'], + updatable=False, + disposition='/' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.name = None + self.batch_account = dict() + self.tags = None + + self.results = dict(changed=False) + self.mgmt_client = None + self.state = None + self.to_do = Actions.NoAction + + super(AzureRMBatchAccount, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()) + ['tags']: + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + self.batch_account[key] = kwargs[key] + + resource_group = self.get_resource_group(self.resource_group) + if self.batch_account.get('location') is None: + self.batch_account['location'] = resource_group.location + if self.batch_account.get('auto_storage_account') is not None: + self.batch_account['auto_storage'] = { + 'storage_account_id': self.normalize_resource_id( + self.batch_account.pop('auto_storage_account'), + '/subscriptions/{subscription_id}/resourceGroups/{resource_group}/providers/Microsoft.Storage/storageAccounts/{name}') + } + if self.batch_account.get('key_vault') is not None: + id = self.normalize_resource_id( + self.batch_account.pop('key_vault'), + '/subscriptions/{subscription_id}/resourceGroups/{resource_group}/providers/Microsoft.KeyVault/vaults/{name}') + url = 'https://' + id.split('/').pop() + '.vault.azure.net/' + self.batch_account['key_vault_reference'] = { + 'id': id, + 'url': url + } + self.batch_account['pool_allocation_mode'] = _snake_to_camel(self.batch_account['pool_allocation_mode'], True) + + response = None + + self.mgmt_client = self.get_mgmt_svc_client(BatchManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager) + + old_response = self.get_batchaccount() + + if not old_response: + self.log("Batch Account instance doesn't exist") + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + self.log("Batch Account instance already exists") + if self.state == 'absent': + self.to_do = Actions.Delete + elif self.state == 'present': + self.results['old'] = old_response + self.results['new'] = self.batch_account + if not self.idempotency_check(old_response, self.batch_account): + self.to_do = Actions.Update + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log("Need to Create / Update the Batch Account instance") + + self.results['changed'] = True + if self.check_mode: + return self.results + + response = self.create_update_batchaccount() + + self.log("Creation / Update done") + elif self.to_do == Actions.Delete: + self.log("Batch Account instance deleted") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_batchaccount() + else: + self.log("Batch Account instance unchanged") + self.results['changed'] = False + response = old_response + + if self.state == 'present': + self.results.update({ + 'id': response.get('id', None), + 'account_endpoint': response.get('account_endpoint', None) + }) + return self.results + + def create_update_batchaccount(self): + ''' + Creates or updates Batch Account with the specified configuration. + + :return: deserialized Batch Account instance state dictionary + ''' + self.log("Creating / Updating the Batch Account instance {0}".format(self.name)) + + try: + if self.to_do == Actions.Create: + response = self.mgmt_client.batch_account.create(resource_group_name=self.resource_group, + account_name=self.name, + parameters=self.batch_account) + else: + response = self.mgmt_client.batch_account.update(resource_group_name=self.resource_group, + account_name=self.name, + tags=self.tags, + auto_storage=self.batch_account.get('auto_storage')) + if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller): + response = self.get_poller_result(response) + except CloudError as exc: + self.log('Error attempting to create the Batch Account instance.') + self.fail("Error creating the Batch Account instance: {0}".format(str(exc))) + return response.as_dict() + + def delete_batchaccount(self): + ''' + Deletes specified Batch Account instance in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the Batch Account instance {0}".format(self.name)) + try: + response = self.mgmt_client.batch_account.delete(resource_group_name=self.resource_group, + account_name=self.name) + except CloudError as e: + self.log('Error attempting to delete the Batch Account instance.') + self.fail("Error deleting the Batch Account instance: {0}".format(str(e))) + + if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller): + response = self.get_poller_result(response) + return True + + def get_batchaccount(self): + ''' + Gets the properties of the specified Batch Account + :return: deserialized Batch Account instance state dictionary + ''' + self.log("Checking if the Batch Account instance {0} is present".format(self.name)) + found = False + try: + response = self.mgmt_client.batch_account.get(resource_group_name=self.resource_group, + account_name=self.name) + found = True + self.log("Response : {0}".format(response)) + self.log("Batch Account instance : {0} found".format(response.name)) + except CloudError as e: + self.log('Did not find the Batch Account instance.') + if found is True: + return response.as_dict() + return False + + +def main(): + """Main execution""" + AzureRMBatchAccount() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_cdnendpoint.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_cdnendpoint.py new file mode 100644 index 000000000..c289ad0fe --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_cdnendpoint.py @@ -0,0 +1,664 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Hai Cao, , Yunge Zhu +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_cdnendpoint +version_added: "0.1.2" +short_description: Manage a Azure CDN endpoint +description: + - Create, update, start, stop and delete a Azure CDN endpoint. + +options: + resource_group: + description: + - Name of a resource group where the Azure CDN endpoint exists or will be created. + required: true + name: + description: + - Name of the Azure CDN endpoint. + required: true + location: + description: + - Valid azure location. Defaults to location of the resource group. + started: + description: + - Use with I(state=present) to start the endpoint. + type: bool + purge: + description: + - Use with I(state=present) to purge the endpoint. + type: bool + purge_content_paths: + description: + - Use with I(state=present) and I(purge=true) to specify content paths to be purged. + type: list + elements: str + default: ['/'] + profile_name: + description: + - Name of the CDN profile where the endpoint attached to. + required: true + origins: + description: + - Set of source of the content being delivered via CDN. + suboptions: + name: + description: + - Origin name. + required: true + host_name: + description: + - The address of the origin. + - It can be a domain name, IPv4 address, or IPv6 address. + required: true + http_port: + description: + - The value of the HTTP port. Must be between C(1) and C(65535). + type: int + https_port: + description: + - The value of the HTTPS port. Must be between C(1) and C(65535). + type: int + required: true + origin_host_header: + description: + - The host header value sent to the origin with each request. + type: str + origin_path: + description: + - A directory path on the origin that CDN can use to retrieve content from. + - E.g. contoso.cloudapp.net/originpath. + type: str + content_types_to_compress: + description: + - List of content types on which compression applies. + - This value should be a valid MIME type. + type: list + elements: str + is_compression_enabled: + description: + - Indicates whether content compression is enabled on CDN. + type: bool + default: false + is_http_allowed: + description: + - Indicates whether HTTP traffic is allowed on the endpoint. + type: bool + default: true + is_https_allowed: + description: + - Indicates whether HTTPS traffic is allowed on the endpoint. + type: bool + default: true + query_string_caching_behavior: + description: + - Defines how CDN caches requests that include query strings. + type: str + choices: + - ignore_query_string + - bypass_caching + - use_query_string + - not_set + default: ignore_query_string + state: + description: + - Assert the state of the Azure CDN endpoint. Use C(present) to create or update a Azure CDN endpoint and C(absent) to delete it. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Yunge Zhu (@yungezz) +''' + +EXAMPLES = ''' + - name: Create a Azure CDN endpoint + azure_rm_cdnendpoint: + resource_group: myResourceGroup + profile_name: myProfile + name: myEndpoint + origins: + - name: TestOrig + host_name: "www.example.com" + tags: + testing: testing + delete: on-exit + foo: bar + - name: Delete a Azure CDN endpoint + azure_rm_cdnendpoint: + resource_group: myResourceGroup + profile_name: myProfile + name: myEndpoint + state: absent +''' +RETURN = ''' +state: + description: Current state of the Azure CDN endpoint. + returned: always + type: str +id: + description: + - Id of the CDN endpoint. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/Microsoft.Cdn/profiles/myProfile/endpoints/ + myEndpoint" +host_name: + description: + - Host name of the CDN endpoint. + returned: always + type: str + sample: "myendpoint.azureedge.net" +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +from ansible.module_utils.common.dict_transformations import _snake_to_camel + +try: + from azure.mgmt.cdn.models import Endpoint, DeepCreatedOrigin, EndpointUpdateParameters, QueryStringCachingBehavior + from azure.mgmt.cdn import CdnManagementClient +except ImportError: + # This is handled in azure_rm_common + pass + + +def cdnendpoint_to_dict(cdnendpoint): + return dict( + id=cdnendpoint.id, + name=cdnendpoint.name, + type=cdnendpoint.type, + location=cdnendpoint.location, + tags=cdnendpoint.tags, + origin_host_header=cdnendpoint.origin_host_header, + origin_path=cdnendpoint.origin_path, + content_types_to_compress=cdnendpoint.content_types_to_compress, + is_compression_enabled=cdnendpoint.is_compression_enabled, + is_http_allowed=cdnendpoint.is_http_allowed, + is_https_allowed=cdnendpoint.is_https_allowed, + query_string_caching_behavior=cdnendpoint.query_string_caching_behavior, + optimization_type=cdnendpoint.optimization_type, + probe_path=cdnendpoint.probe_path, + geo_filters=[geo_filter_to_dict(geo_filter) for geo_filter in cdnendpoint.geo_filters] if cdnendpoint.geo_filters else None, + host_name=cdnendpoint.host_name, + origins=[deep_created_origin_to_dict(origin) for origin in cdnendpoint.origins] if cdnendpoint.origins else None, + resource_state=cdnendpoint.resource_state, + provisioning_state=cdnendpoint.provisioning_state + ) + + +def deep_created_origin_to_dict(origin): + return dict( + name=origin.name, + host_name=origin.host_name, + http_port=origin.http_port, + https_port=origin.https_port, + ) + + +def geo_filter_to_dict(geo_filter): + return dict( + relative_path=geo_filter.relative_path, + action=geo_filter.action, + country_codes=geo_filter.country_codes, + ) + + +def default_content_types(): + return ["text/plain", + "text/html", + "text/css", + "text/javascript", + "application/x-javascript", + "application/javascript", + "application/json", + "application/xml"] + + +origin_spec = dict( + name=dict( + type='str', + required=True + ), + host_name=dict( + type='str', + required=True + ), + http_port=dict( + type='int' + ), + https_port=dict( + type='int' + ) +) + + +class AzureRMCdnendpoint(AzureRMModuleBase): + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + location=dict( + type='str' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ), + started=dict( + type='bool' + ), + purge=dict( + type='bool' + ), + purge_content_paths=dict( + type='list', + elements='str', + default=['/'] + ), + profile_name=dict( + type='str', + required=True + ), + origins=dict( + type='list', + elements='dict', + options=origin_spec + ), + origin_host_header=dict( + type='str', + ), + origin_path=dict( + type='str', + ), + content_types_to_compress=dict( + type='list', + elements='str', + ), + is_compression_enabled=dict( + type='bool', + default=False + ), + is_http_allowed=dict( + type='bool', + default=True + ), + is_https_allowed=dict( + type='bool', + default=True + ), + query_string_caching_behavior=dict( + type='str', + choices=[ + 'ignore_query_string', + 'bypass_caching', + 'use_query_string', + 'not_set' + ], + default='ignore_query_string' + ), + ) + + self.resource_group = None + self.name = None + self.state = None + self.started = None + self.purge = None + self.purge_content_paths = None + self.location = None + self.profile_name = None + self.origins = None + self.tags = None + self.origin_host_header = None + self.origin_path = None + self.content_types_to_compress = None + self.is_compression_enabled = None + self.is_http_allowed = None + self.is_https_allowed = None + self.query_string_caching_behavior = None + + self.cdn_client = None + + self.results = dict(changed=False) + + super(AzureRMCdnendpoint, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + self.cdn_client = self.get_cdn_client() + + to_be_updated = False + + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + self.location = resource_group.location + + if self.query_string_caching_behavior: + self.query_string_caching_behavior = _snake_to_camel(self.query_string_caching_behavior, capitalize_first=True) + + response = self.get_cdnendpoint() + + if self.state == 'present': + + if not response: + + if self.started is None: + # If endpoint doesn't exist and no start/stop operation specified, create endpoint. + if self.origins is None: + self.fail("Origins is not provided when trying to create endpoint") + self.log("Need to create the Azure CDN endpoint") + + if not self.check_mode: + result = self.create_cdnendpoint() + self.results['id'] = result['id'] + self.results['host_name'] = result['host_name'] + self.log("Creation done") + + self.results['changed'] = True + return self.results + + else: + # Fail the module when user try to start/stop a non-existed endpoint + self.log("Can't stop/stop a non-existed endpoint") + self.fail("This endpoint is not found, stop/start is forbidden") + + else: + self.log('Results : {0}'.format(response)) + self.results['id'] = response['id'] + self.results['host_name'] = response['host_name'] + + update_tags, response['tags'] = self.update_tags(response['tags']) + + if update_tags: + to_be_updated = True + + if response['provisioning_state'] == "Succeeded": + if self.started is False and response['resource_state'] == 'Running': + self.log("Need to stop the Azure CDN endpoint") + + if not self.check_mode: + result = self.stop_cdnendpoint() + self.log("Endpoint stopped") + + self.results['changed'] = True + + elif self.started and response['resource_state'] == 'Stopped': + self.log("Need to start the Azure CDN endpoint") + + if not self.check_mode: + result = self.start_cdnendpoint() + self.log("Endpoint started") + + self.results['changed'] = True + + elif self.started is not None: + self.module.warn("Start/Stop not performed due to current resource state {0}".format(response['resource_state'])) + self.results['changed'] = False + + if self.purge: + self.log("Need to purge endpoint") + + if not self.check_mode: + result = self.purge_cdnendpoint() + self.log("Endpoint purged") + + self.results['changed'] = True + + to_be_updated = to_be_updated or self.check_update(response) + + if to_be_updated: + self.log("Need to update the Azure CDN endpoint") + self.results['changed'] = True + + if not self.check_mode: + result = self.update_cdnendpoint() + self.results['host_name'] = result['host_name'] + self.log("Update done") + + elif self.started is not None: + self.module.warn("Start/Stop not performed due to current provisioning state {0}".format(response['provisioning_state'])) + self.results['changed'] = False + + elif self.state == 'absent' and response: + self.log("Need to delete the Azure CDN endpoint") + self.results['changed'] = True + + if not self.check_mode: + self.delete_cdnendpoint() + self.log("Azure CDN endpoint deleted") + + return self.results + + def create_cdnendpoint(self): + ''' + Creates a Azure CDN endpoint. + + :return: deserialized Azure CDN endpoint instance state dictionary + ''' + self.log("Creating the Azure CDN endpoint instance {0}".format(self.name)) + + origins = [] + for item in self.origins: + origins.append( + DeepCreatedOrigin(name=item['name'], + host_name=item['host_name'], + http_port=item['http_port'] if 'http_port' in item else None, + https_port=item['https_port'] if 'https_port' in item else None) + ) + + parameters = Endpoint( + origins=origins, + location=self.location, + tags=self.tags, + origin_host_header=self.origin_host_header, + origin_path=self.origin_path, + content_types_to_compress=default_content_types() if self.is_compression_enabled and not self.content_types_to_compress + else self.content_types_to_compress, + is_compression_enabled=self.is_compression_enabled if self.is_compression_enabled is not None else False, + is_http_allowed=self.is_http_allowed if self.is_http_allowed is not None else True, + is_https_allowed=self.is_https_allowed if self.is_https_allowed is not None else True, + query_string_caching_behavior=self.query_string_caching_behavior if self.query_string_caching_behavior + else QueryStringCachingBehavior.ignore_query_string + ) + + try: + poller = self.cdn_client.endpoints.begin_create(self.resource_group, self.profile_name, self.name, parameters) + response = self.get_poller_result(poller) + return cdnendpoint_to_dict(response) + except Exception as exc: + self.log('Error attempting to create Azure CDN endpoint instance.') + self.fail("Error creating Azure CDN endpoint instance: {0}".format(exc.message)) + + def update_cdnendpoint(self): + ''' + Updates a Azure CDN endpoint. + + :return: deserialized Azure CDN endpoint instance state dictionary + ''' + self.log("Updating the Azure CDN endpoint instance {0}".format(self.name)) + + endpoint_update_properties = EndpointUpdateParameters( + tags=self.tags, + origin_host_header=self.origin_host_header, + origin_path=self.origin_path, + content_types_to_compress=default_content_types() if self.is_compression_enabled and not self.content_types_to_compress + else self.content_types_to_compress, + is_compression_enabled=self.is_compression_enabled, + is_http_allowed=self.is_http_allowed, + is_https_allowed=self.is_https_allowed, + query_string_caching_behavior=self.query_string_caching_behavior, + ) + + try: + poller = self.cdn_client.endpoints.begin_update(self.resource_group, self.profile_name, self.name, endpoint_update_properties) + response = self.get_poller_result(poller) + return cdnendpoint_to_dict(response) + except Exception as exc: + self.log('Error attempting to update Azure CDN endpoint instance.') + self.fail("Error updating Azure CDN endpoint instance: {0}".format(exc.message)) + + def delete_cdnendpoint(self): + ''' + Deletes the specified Azure CDN endpoint in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the Azure CDN endpoint {0}".format(self.name)) + try: + poller = self.cdn_client.endpoints.begin_delete( + self.resource_group, self.profile_name, self.name) + self.get_poller_result(poller) + return True + except Exception as e: + self.log('Error attempting to delete the Azure CDN endpoint.') + self.fail("Error deleting the Azure CDN endpoint: {0}".format(e.message)) + return False + + def get_cdnendpoint(self): + ''' + Gets the properties of the specified Azure CDN endpoint. + + :return: deserialized Azure CDN endpoint state dictionary + ''' + self.log( + "Checking if the Azure CDN endpoint {0} is present".format(self.name)) + try: + response = self.cdn_client.endpoints.get(self.resource_group, self.profile_name, self.name) + self.log("Response : {0}".format(response)) + self.log("Azure CDN endpoint : {0} found".format(response.name)) + return cdnendpoint_to_dict(response) + except Exception: + self.log('Did not find the Azure CDN endpoint.') + return False + + def start_cdnendpoint(self): + ''' + Starts an existing Azure CDN endpoint that is on a stopped state. + + :return: deserialized Azure CDN endpoint state dictionary + ''' + self.log( + "Starting the Azure CDN endpoint {0}".format(self.name)) + try: + poller = self.cdn_client.endpoints.begin_start(self.resource_group, self.profile_name, self.name) + response = self.get_poller_result(poller) + self.log("Response : {0}".format(response)) + self.log("Azure CDN endpoint : {0} started".format(response.name)) + return self.get_cdnendpoint() + except Exception: + self.log('Fail to start the Azure CDN endpoint.') + return False + + def purge_cdnendpoint(self): + ''' + Purges an existing Azure CDN endpoint. + + :return: deserialized Azure CDN endpoint state dictionary + ''' + self.log( + "Purging the Azure CDN endpoint {0}".format(self.name)) + try: + poller = self.cdn_client.endpoints.begin_purge_content(self.resource_group, + self.profile_name, + self.name, + content_file_paths=dict(content_paths=self.purge_content_paths)) + response = self.get_poller_result(poller) + self.log("Response : {0}".format(response)) + return self.get_cdnendpoint() + except Exception as e: + self.fail('Fail to purge the Azure CDN endpoint.') + return False + + def stop_cdnendpoint(self): + ''' + Stops an existing Azure CDN endpoint that is on a running state. + + :return: deserialized Azure CDN endpoint state dictionary + ''' + self.log( + "Stopping the Azure CDN endpoint {0}".format(self.name)) + try: + poller = self.cdn_client.endpoints.begin_stop(self.resource_group, self.profile_name, self.name) + response = self.get_poller_result(poller) + self.log("Response : {0}".format(response)) + self.log("Azure CDN endpoint : {0} stopped".format(response.name)) + return self.get_cdnendpoint() + except Exception: + self.log('Fail to stop the Azure CDN endpoint.') + return False + + def check_update(self, response): + + if self.origin_host_header and response['origin_host_header'] != self.origin_host_header: + self.log("Origin host header Diff - Origin {0} / Update {1}".format(response['origin_host_header'], self.origin_host_header)) + return True + + if self.origin_path and response['origin_path'] != self.origin_path: + self.log("Origin path Diff - Origin {0} / Update {1}".format(response['origin_path'], self.origin_path)) + return True + + if self.content_types_to_compress and response['content_types_to_compress'] != self.content_types_to_compress: + self.log("Content types to compress Diff - Origin {0} / Update {1}".format(response['content_types_to_compress'], self.content_types_to_compress)) + return True + + if self.is_compression_enabled is not None and response['is_compression_enabled'] != self.is_compression_enabled: + self.log("is_compression_enabled Diff - Origin {0} / Update {1}".format(response['is_compression_enabled'], self.is_compression_enabled)) + return True + + if self.is_http_allowed is not None and response['is_http_allowed'] != self.is_http_allowed: + self.log("is_http_allowed Diff - Origin {0} / Update {1}".format(response['is_http_allowed'], self.is_http_allowed)) + return True + + if self.is_https_allowed is not None and response['is_https_allowed'] != self.is_https_allowed: + self.log("is_https_allowed Diff - Origin {0} / Update {1}".format(response['is_https_allowed'], self.is_https_allowed)) + return True + + if self.query_string_caching_behavior and \ + _snake_to_camel(response['query_string_caching_behavior']).lower() != _snake_to_camel(self.query_string_caching_behavior).lower(): + self.log("query_string_caching_behavior Diff - Origin {0} / Update {1}".format(response['query_string_caching_behavior'], + self.query_string_caching_behavior)) + return True + + return False + + def get_cdn_client(self): + if not self.cdn_client: + self.cdn_client = self.get_mgmt_svc_client(CdnManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager, + is_track2=True, + api_version='2017-04-02') + return self.cdn_client + + +def main(): + """Main execution""" + AzureRMCdnendpoint() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_cdnendpoint_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_cdnendpoint_info.py new file mode 100644 index 000000000..897071eb9 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_cdnendpoint_info.py @@ -0,0 +1,321 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Hai Cao, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_cdnendpoint_info + +version_added: "0.1.2" + +short_description: Get Azure CDN endpoint facts + +description: + - Get facts for a specific Azure CDN endpoint or all Azure CDN endpoints. + +options: + resource_group: + description: + - Name of resource group where this CDN profile belongs to. + required: true + profile_name: + description: + - Name of CDN profile. + required: true + name: + description: + - Limit results to a specific Azure CDN endpoint. + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Hai Cao (@caohai) + - Yunge zhu (@yungezz) +''' + +EXAMPLES = ''' + - name: Get facts for all endpoints in CDN profile + azure_rm_cdnendpoint_info: + resource_group: myResourceGroup + profile_name: myCDNProfile + tags: + - key + - key:value + + - name: Get facts of specific CDN endpoint + azure_rm_cdnendpoint_info: + resource_group: myResourceGroup + profile_name: myCDNProfile + name: myEndpoint1 +''' + +RETURN = ''' +cdnendpoints: + description: List of Azure CDN endpoints. + returned: always + type: complex + contains: + resource_group: + description: + - Name of a resource group where the Azure CDN endpoint exists. + returned: always + type: str + sample: myResourceGroup + name: + description: + - Name of the Azure CDN endpoint. + returned: always + type: str + sample: myEndpoint + profile_name: + description: + - Name of the Azure CDN profile that this endpoint is attached to. + returned: always + type: str + sample: myProfile + location: + description: + - Location of the Azure CDN endpoint. + type: str + sample: WestUS + id: + description: + - ID of the Azure CDN endpoint. + type: str + sample: + "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myCDN/providers/Microsoft.Cdn/profiles/myProfile/endpoints/myEndpoint1" + provisioning_state: + description: + - Provisioning status of the Azure CDN endpoint. + type: str + sample: Succeeded + resource_state: + description: + - Resource status of the profile. + type: str + sample: Running + is_compression_enabled: + description: + - Indicates whether content compression is enabled on CDN. + type: bool + sample: true + is_http_allowed: + description: + - Indicates whether HTTP traffic is allowed on the endpoint. + type: bool + sample: true + is_https_allowed: + description: + - Indicates whether HTTPS traffic is allowed on the endpoint. + type: bool + sample: true + query_string_caching_behavior: + description: + - Defines how CDN caches requests that include query strings. + type: str + sample: IgnoreQueryString + content_types_to_compress: + description: + - List of content types on which compression applies. + type: list + sample: [ + "text/plain", + "text/html", + "text/css", + "text/javascript", + "application/x-javascript", + "application/javascript", + "application/json", + "application/xml" + ] + origins: + description: + - The source of the content being delivered via CDN. + sample: { + "host_name": "xxxxxxxx.blob.core.windows.net", + "http_port": null, + "https_port": null, + "name": "xxxxxxxx-blob-core-windows-net" + } + origin_host_header: + description: + - The host header value sent to the origin with each request. + type: str + sample: xxxxxxxx.blob.core.windows.net + origin_path: + description: + - A directory path on the origin that CDN can use to retrieve content from. + type: str + sample: /pic/ + tags: + description: + - The tags of the Azure CDN endpoint. + type: list + sample: foo +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.mgmt.cdn import CdnManagementClient + from azure.mgmt.cdn.models import ErrorResponseException + from azure.common import AzureHttpError +except ImportError: + # handled in azure_rm_common + pass + +import re + +AZURE_OBJECT_CLASS = 'endpoints' + + +class AzureRMCdnEndpointInfo(AzureRMModuleBase): + """Utility class to get Azure Azure CDN endpoint facts""" + + def __init__(self): + + self.module_args = dict( + name=dict(type='str'), + resource_group=dict( + type='str', + required=True + ), + profile_name=dict( + type='str', + required=True + ), + tags=dict( + type='list', + elements='str' + ) + ) + + self.results = dict( + changed=False, + cdnendpoints=[] + ) + + self.name = None + self.resource_group = None + self.profile_name = None + self.tags = None + + super(AzureRMCdnEndpointInfo, self).__init__( + supports_check_mode=True, + derived_arg_spec=self.module_args, + supports_tags=False, + facts_module=True + ) + + def exec_module(self, **kwargs): + + is_old_facts = self.module._name == 'azure_rm_cdnendpoint_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_cdnendpoint_facts' module has been renamed to 'azure_rm_cdnendpoint_info'", version=(2.9, )) + + for key in self.module_args: + setattr(self, key, kwargs[key]) + + self.cdn_client = self.get_mgmt_svc_client(CdnManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager, + is_track2=True, + api_version='2017-04-02') + + if self.name: + self.results['cdnendpoints'] = self.get_item() + else: + self.results['cdnendpoints'] = self.list_by_profile() + + return self.results + + def get_item(self): + """Get a single Azure Azure CDN endpoint""" + + self.log('Get properties for {0}'.format(self.name)) + + item = None + result = [] + + try: + item = self.cdn_client.endpoints.get( + self.resource_group, self.profile_name, self.name) + except Exception: + pass + + if item and self.has_tags(item.tags, self.tags): + result = [self.serialize_cdnendpoint(item)] + + return result + + def list_by_profile(self): + """Get all Azure Azure CDN endpoints within an Azure CDN profile""" + + self.log('List all Azure CDN endpoints within an Azure CDN profile') + + try: + response = self.cdn_client.endpoints.list_by_profile( + self.resource_group, self.profile_name) + except Exception as exc: + self.fail('Failed to list all items - {0}'.format(str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(self.serialize_cdnendpoint(item)) + + return results + + def serialize_cdnendpoint(self, cdnendpoint): + ''' + Convert a Azure CDN endpoint object to dict. + :param cdn: Azure CDN endpoint object + :return: dict + ''' + result = self.serialize_obj(cdnendpoint, AZURE_OBJECT_CLASS) + + new_result = {} + new_result['id'] = cdnendpoint.id + new_result['resource_group'] = re.sub('\\/.*', '', re.sub('.*resourcegroups\\/', '', result['id'])) + new_result['profile_name'] = re.sub('\\/.*', '', re.sub('.*profiles\\/', '', result['id'])) + new_result['name'] = cdnendpoint.name + new_result['type'] = cdnendpoint.type + new_result['location'] = cdnendpoint.location + new_result['resource_state'] = cdnendpoint.resource_state + new_result['provisioning_state'] = cdnendpoint.provisioning_state + new_result['query_string_caching_behavior'] = cdnendpoint.query_string_caching_behavior + new_result['is_compression_enabled'] = cdnendpoint.is_compression_enabled + new_result['is_http_allowed'] = cdnendpoint.is_http_allowed + new_result['is_https_allowed'] = cdnendpoint.is_https_allowed + new_result['content_types_to_compress'] = cdnendpoint.content_types_to_compress + new_result['origin_host_header'] = cdnendpoint.origin_host_header + new_result['origin_path'] = cdnendpoint.origin_path + new_result['origin'] = dict( + name=cdnendpoint.origins[0].name, + host_name=cdnendpoint.origins[0].host_name, + http_port=cdnendpoint.origins[0].http_port, + https_port=cdnendpoint.origins[0].https_port + ) + new_result['tags'] = cdnendpoint.tags + return new_result + + +def main(): + """Main module execution code path""" + + AzureRMCdnEndpointInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_cdnprofile.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_cdnprofile.py new file mode 100644 index 000000000..012a9cba8 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_cdnprofile.py @@ -0,0 +1,299 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Hai Cao, , Yunge Zhu +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_cdnprofile +version_added: "0.1.2" +short_description: Manage a Azure CDN profile +description: + - Create, update and delete a Azure CDN profile. + +options: + resource_group: + description: + - Name of a resource group where the CDN profile exists or will be created. + required: true + name: + description: + - Name of the CDN profile. + required: true + location: + description: + - Valid Azure location. Defaults to location of the resource group. + sku: + description: + - The pricing tier, defines a CDN provider, feature list and rate of the CDN profile. + - Detailed pricing can be find at U(https://azure.microsoft.com/en-us/pricing/details/cdn/). + choices: + - standard_verizon + - premium_verizon + - custom_verizon + - standard_akamai + - standard_chinacdn + - standard_microsoft + state: + description: + - Assert the state of the CDN profile. Use C(present) to create or update a CDN profile and C(absent) to delete it. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Hai Cao (@caohai) + - Yunge Zhu (@yungezz) +''' + +EXAMPLES = ''' + - name: Create a CDN profile + azure_rm_cdnprofile: + resource_group: myResourceGroup + name: myCDN + sku: standard_akamai + tags: + testing: testing + + - name: Delete the CDN profile + azure_rm_cdnprofile: + resource_group: myResourceGroup + name: myCDN + state: absent +''' +RETURN = ''' +id: + description: Current state of the CDN profile. + returned: always + type: dict + example: + id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/Microsoft.Cdn/profiles/myCDN +''' +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +import uuid + +try: + from azure.mgmt.cdn.models import Profile, Sku + from azure.mgmt.cdn import CdnManagementClient +except ImportError as ec: + # This is handled in azure_rm_common + pass + + +def cdnprofile_to_dict(cdnprofile): + return dict( + id=cdnprofile.id, + name=cdnprofile.name, + type=cdnprofile.type, + location=cdnprofile.location, + sku=cdnprofile.sku.name, + resource_state=cdnprofile.resource_state, + provisioning_state=cdnprofile.provisioning_state, + tags=cdnprofile.tags + ) + + +class AzureRMCdnprofile(AzureRMModuleBase): + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + location=dict( + type='str' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ), + sku=dict( + type='str', + choices=['standard_verizon', 'premium_verizon', 'custom_verizon', 'standard_akamai', 'standard_chinacdn', 'standard_microsoft'] + ) + ) + + self.resource_group = None + self.name = None + self.location = None + self.state = None + self.tags = None + self.sku = None + + self.cdn_client = None + + required_if = [ + ('state', 'present', ['sku']) + ] + + self.results = dict(changed=False) + + super(AzureRMCdnprofile, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True, + required_if=required_if) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + self.cdn_client = self.get_cdn_client() + + to_be_updated = False + + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + self.location = resource_group.location + + response = self.get_cdnprofile() + + if self.state == 'present': + + if not response: + self.log("Need to create the CDN profile") + + if not self.check_mode: + new_response = self.create_cdnprofile() + self.results['id'] = new_response['id'] + + self.results['changed'] = True + + else: + self.log('Results : {0}'.format(response)) + update_tags, response['tags'] = self.update_tags(response['tags']) + + if response['provisioning_state'] == "Succeeded": + if update_tags: + to_be_updated = True + + if to_be_updated: + self.log("Need to update the CDN profile") + + if not self.check_mode: + new_response = self.update_cdnprofile() + self.results['id'] = new_response['id'] + + self.results['changed'] = True + + elif self.state == 'absent': + if not response: + self.fail("CDN profile {0} not exists.".format(self.name)) + else: + self.log("Need to delete the CDN profile") + self.results['changed'] = True + + if not self.check_mode: + self.delete_cdnprofile() + self.results['id'] = response['id'] + + return self.results + + def create_cdnprofile(self): + ''' + Creates a Azure CDN profile. + + :return: deserialized Azure CDN profile instance state dictionary + ''' + self.log("Creating the Azure CDN profile instance {0}".format(self.name)) + + parameters = Profile( + location=self.location, + sku=Sku(name=self.sku), + tags=self.tags + ) + + xid = str(uuid.uuid1()) + + try: + poller = self.cdn_client.profiles.begin_create(self.resource_group, + self.name, + parameters) + response = self.get_poller_result(poller) + return cdnprofile_to_dict(response) + except Exception as exc: + self.log('Error attempting to create Azure CDN profile instance.') + self.fail("Error Creating Azure CDN profile instance: {0}".format(exc.message)) + + def update_cdnprofile(self): + ''' + Updates a Azure CDN profile. + + :return: deserialized Azure CDN profile instance state dictionary + ''' + self.log("Updating the Azure CDN profile instance {0}".format(self.name)) + + try: + poller = self.cdn_client.profiles.begin_update(self.resource_group, self.name, {'tags': self.tags}) + response = self.get_poller_result(poller) + return cdnprofile_to_dict(response) + except Exception as exc: + self.log('Error attempting to update Azure CDN profile instance.') + self.fail("Error updating Azure CDN profile instance: {0}".format(exc.message)) + + def delete_cdnprofile(self): + ''' + Deletes the specified Azure CDN profile in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the CDN profile {0}".format(self.name)) + try: + poller = self.cdn_client.profiles.begin_delete( + self.resource_group, self.name) + self.get_poller_result(poller) + return True + except Exception as e: + self.log('Error attempting to delete the CDN profile.') + self.fail("Error deleting the CDN profile: {0}".format(e.message)) + return False + + def get_cdnprofile(self): + ''' + Gets the properties of the specified CDN profile. + + :return: deserialized CDN profile state dictionary + ''' + self.log( + "Checking if the CDN profile {0} is present".format(self.name)) + try: + response = self.cdn_client.profiles.get(self.resource_group, self.name) + self.log("Response : {0}".format(response)) + self.log("CDN profile : {0} found".format(response.name)) + return cdnprofile_to_dict(response) + except Exception: + self.log('Did not find the CDN profile.') + return False + + def get_cdn_client(self): + if not self.cdn_client: + self.cdn_client = self.get_mgmt_svc_client(CdnManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager, + is_track2=True, + api_version='2017-04-02') + return self.cdn_client + + +def main(): + """Main execution""" + AzureRMCdnprofile() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_cdnprofile_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_cdnprofile_info.py new file mode 100644 index 000000000..0a0e7c9c9 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_cdnprofile_info.py @@ -0,0 +1,266 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Hai Cao, , Yunge Zhu +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_cdnprofile_info + +version_added: "0.1.2" + +short_description: Get Azure CDN profile facts + +description: + - Get facts for a specific Azure CDN profile or all CDN profiles. + +options: + name: + description: + - Limit results to a specific CDN profile. + resource_group: + description: + - The resource group to search for the desired CDN profile. + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Hai Cao (@caohai) + - Yunge Zhu (@yungezz) +''' + +EXAMPLES = ''' + - name: Get facts for one CDN profile + azure_rm_cdnprofile_info: + name: Testing + resource_group: myResourceGroup + + - name: Get facts for all CDN profiles + azure_rm_cdnprofile_info: + + - name: Get facts by tags + azure_rm_cdnprofile_info: + tags: + - Environment:Test +''' + +RETURN = ''' +cdnprofiles: + description: List of CDN profiles. + returned: always + type: complex + contains: + resource_group: + description: + - Name of a resource group where the CDN profile exists. + returned: always + type: str + sample: myResourceGroup + name: + description: + - Name of the CDN profile. + returned: always + type: str + sample: Testing + location: + description: + - Location of the CDN profile. + type: str + sample: WestUS + id: + description: + - ID of the CDN profile. + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/Microsoft.Cdn/profiles/myCDN + provisioning_state: + description: + - Provisioning status of the profile. + type: str + sample: Succeeded + resource_state: + description: + - Resource status of the profile. + type: str + sample: Active + sku: + description: + - The pricing tier, defines a CDN provider, feature list and rate of the CDN profile. + type: str + sample: standard_verizon + type: + description: + - The type of the CDN profile. + type: str + sample: Microsoft.Cdn/profiles + tags: + description: + - The tags of the CDN profile. + type: list + sample: [ + {"foo": "bar"} + ] +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.mgmt.cdn import CdnManagementClient +except Exception: + # handled in azure_rm_common + pass + +import re + +AZURE_OBJECT_CLASS = 'profiles' + + +class AzureRMCdnprofileInfo(AzureRMModuleBase): + """Utility class to get Azure CDN profile facts""" + + def __init__(self): + + self.module_args = dict( + name=dict(type='str'), + resource_group=dict(type='str'), + tags=dict(type='list', elements='str') + ) + + self.results = dict( + changed=False, + cdnprofiles=[] + ) + + self.name = None + self.resource_group = None + self.tags = None + self.cdn_client = None + + super(AzureRMCdnprofileInfo, self).__init__( + derived_arg_spec=self.module_args, + supports_check_mode=True, + supports_tags=False, + facts_module=True + ) + + def exec_module(self, **kwargs): + + is_old_facts = self.module._name == 'azure_rm_cdnprofile_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_cdnprofile_facts' module has been renamed to 'azure_rm_cdnprofile_info'", version=(2.9, )) + + for key in self.module_args: + setattr(self, key, kwargs[key]) + + self.cdn_client = self.get_cdn_client() + + if self.name and not self.resource_group: + self.fail("Parameter error: resource group required when filtering by name.") + + if self.name: + self.results['cdnprofiles'] = self.get_item() + elif self.resource_group: + self.results['cdnprofiles'] = self.list_resource_group() + else: + self.results['cdnprofiles'] = self.list_all() + + return self.results + + def get_item(self): + """Get a single Azure CDN profile""" + + self.log('Get properties for {0}'.format(self.name)) + + item = None + result = [] + + try: + item = self.cdn_client.profiles.get( + self.resource_group, self.name) + except Exception: + pass + + if item and self.has_tags(item.tags, self.tags): + result = [self.serialize_cdnprofile(item)] + + return result + + def list_resource_group(self): + """Get all Azure CDN profiles within a resource group""" + + self.log('List all Azure CDNs within a resource group') + + try: + response = self.cdn_client.profiles.list_by_resource_group( + self.resource_group) + except Exception as exc: + self.fail('Failed to list all items - {0}'.format(str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(self.serialize_cdnprofile(item)) + + return results + + def list_all(self): + """Get all Azure CDN profiles within a subscription""" + self.log('List all CDN profiles within a subscription') + try: + response = self.cdn_client.profiles.list() + except Exception as exc: + self.fail("Error listing all items - {0}".format(str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(self.serialize_cdnprofile(item)) + return results + + def serialize_cdnprofile(self, cdnprofile): + ''' + Convert a CDN profile object to dict. + :param cdn: CDN profile object + :return: dict + ''' + result = self.serialize_obj(cdnprofile, AZURE_OBJECT_CLASS) + + new_result = {} + new_result['id'] = cdnprofile.id + new_result['resource_group'] = re.sub('\\/.*', '', re.sub('.*resourcegroups\\/', '', result['id'])) + new_result['name'] = cdnprofile.name + new_result['type'] = cdnprofile.type + new_result['location'] = cdnprofile.location + new_result['resource_state'] = cdnprofile.resource_state + new_result['sku'] = cdnprofile.sku.name + new_result['provisioning_state'] = cdnprofile.provisioning_state + new_result['tags'] = cdnprofile.tags + return new_result + + def get_cdn_client(self): + if not self.cdn_client: + self.cdn_client = self.get_mgmt_svc_client(CdnManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager, + is_track2=True, + api_version='2017-04-02') + return self.cdn_client + + +def main(): + """Main module execution code path""" + + AzureRMCdnprofileInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_cognitivesearch.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_cognitivesearch.py new file mode 100644 index 000000000..037cee7f1 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_cognitivesearch.py @@ -0,0 +1,487 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 David Duque Hernández, (@next-davidduquehernandez) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_cognitivesearch +version_added: "1.4.0" +short_description: Manage Azure Cognitive Search service +description: + - Create, update or delete Azure Cognitive Search service. +options: + name: + description: + - The name of the Azure Cognitive Search service. + - Search service names must only contain lowercase letters, digits or dashes. + - Cannot use dash as the first two or last one characters. + - Cannot contain consecutive dashes. + - Must be between 2 and 60 characters in length. + - Search service names must be globally unique. + - You cannot change the service name after the service is created. + type: str + required: true + resource_group: + description: + - The name of the resource group within the current subscription. + type: str + required: true + location: + description: + - Valid azure location. Defaults to location of the resource group. + type: str + hosting_mode: + description: + - Applicable only for the standard3 SKU. + - You can set this property to enable up to 3 high density partitions that allow up to 1000 indexes. + - For the standard3 SKU, the value is either 'default' or 'highDensity'. + - For all other SKUs, this value must be 'default'. + choices: + - default + - highDensity + type: str + default: 'default' + identity: + description: + - The identity for the resource. + choices: + - None + - SystemAssigned + type: str + default: 'None' + network_rule_set: + description: + - Network specific rules that determine how the Azure Cognitive Search service may be reached. + type: list + elements: str + partition_count: + description: + - The number of partitions in the search service. + - It can be C(1), C(2), C(3), C(4), C(6), or C(12). + - Values greater than 1 are only valid for standard SKUs. + - For 'standard3' services with hostingMode set to 'highDensity', the allowed values are between 1 and 3. + type: int + default: 1 + public_network_access: + description: + - This value can be set to C(enabled) to avoid breaking changes on existing customer resources and templates. + - If set to C(enabled), traffic over public interface is not allowed, and private endpoint connections would be the exclusive access method. + choices: + - enabled + - disabled + type: str + default: 'enabled' + replica_count: + description: + - The number of replicas in the search service. + - It must be a value between 1 and 12 inclusive for I(sku=standard). + - It must be a value between 1 and 3 inclusive for I(sku=basic). + type: int + default: 1 + sku: + description: + - The SKU of the Search Service, which determines price tier and capacity limits. + - This property is required when creating a new Search Service. + choices: + - free + - basic + - standard + - standard2 + - standard3 + - storage_optimized_l1 + - storage_optimized_l2 + type: str + default: 'basic' + state: + description: + - Assert the state of the search instance. Set to C(present) to create or update a search instance. Set to C(absent) to remove a search instance. + type: str + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - David Duque Hernández (@next-davidduquehernandez) +''' + +EXAMPLES = ''' + - name: Create Azure Cognitive Search + azure_rm_cognitivesearch: + resource_group: myResourceGroup + name: myAzureSearch +''' + +RETURN = ''' +state: + description: + - Info for Azure Cognitive Search. + returned: always + type: dict + contains: + hosting_mode: + description: + - Type of hosting mode selected. + returned: always + type: str + sample: default + id: + description: + - The unique identifier associated with this Azure Cognitive Search. + returned: always + type: str + sample: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + identity: + description: + - The identity of the Azure Cognitive Search Service. + returned: always + type: dict + contains: + principal_id: + description: + - Identifier assigned. + type: str + sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + type: + description: + - Identity type. + returned: always + type: str + sample: SystemAssigned + sample: + principal_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + type: SystemAssigned + location: + description: + - The geo-location where the Azure Cognitive Search Service lives. + returned: always + type: str + sample: West Europe + name: + description: + - The name of the Azure Cognitive Search Service. + returned: always + type: str + sample: myazuresearch + network_rule_set: + description: + - Network specific rules that determine how the Azure Cognitive Search service may be reached. + returned: always + type: list + sample: ['1.1.1.1', '8.8.8.8/31'] + partition_count: + description: + - The number of partitions in the Azure Cognitive Search Service. + returned: always + type: int + sample: 3 + provisioning_state: + description: + - The state of the provisioning state of Azure Cognitive Search Service. + returned: always + type: str + sample: succeeded + public_network_access: + description: + - If it's allowed traffic over public interface. + returned: always + type: str + sample: enabled + replica_count: + description: + - The number of replicas in the Azure Cognitive Search Service. + returned: always + type: int + sample: 3 + sku: + description: + - The SKU of the Azure Cognitive Search Service. + returned: always + type: str + sample: standard + status: + description: + - The state of the Azure Cognitive Search. + returned: always + type: str + sample: Active running + tags: + description: + - The resource tags. + returned: always + type: dict + sample: { "tag1":"abc" } +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMSearch(AzureRMModuleBase): + def __init__(self): + + self.module_arg_spec = dict( + hosting_mode=dict(type='str', default='default', choices=['default', 'highDensity']), + identity=dict(type='str', default='None', choices=['None', 'SystemAssigned']), + location=dict(type='str'), + name=dict(type='str', required=True), + network_rule_set=dict(type='list', elements='str'), + partition_count=dict(type='int', default=1), + public_network_access=dict(type='str', default='enabled', choices=['enabled', 'disabled']), + replica_count=dict(type='int', default=1), + resource_group=dict(type='str', required=True), + sku=dict(type='str', default='basic', choices=['free', 'basic', 'standard', 'standard2', 'standard3', + 'storage_optimized_l1', 'storage_optimized_l2']), + state=dict(type='str', default='present', choices=['present', 'absent']), + tags=dict(type='dict') + ) + + self.hosting_mode = None + self.identity = None + self.location = None + self.name = None + self.network_rule_set = list() + self.partition_count = None + self.public_network_access = None + self.replica_count = None + self.resource_group = None + self.sku = None + self.tags = None + + self.results = dict(changed=False) + self.account_dict = None + self.firewall_list = list() + + super(AzureRMSearch, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=False, + supports_tags=True) + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + self.location = resource_group.location + + self.account_dict = self.get_search() + + if self.account_dict is not None: + self.results['state'] = self.account_dict + else: + self.results['state'] = dict() + + if self.state == 'present': + if not self.account_dict: + self.results['state'] = self.create_search() + else: + self.results['state'] = self.update_search() + else: + self.delete_search() + self.results['state'] = dict(state='Deleted') + + return self.results + + def get_search(self): + self.log('Get properties for azure search {0}'.format(self.name)) + search_obj = None + account_dict = None + + try: + search_obj = self.search_client.services.get(self.resource_group, self.name) + except ResourceNotFoundError: + pass + + if search_obj: + account_dict = self.account_obj_to_dict(search_obj) + + return account_dict + + def check_values(self, hosting_mode, sku, partition_count, replica_count): + if ( + hosting_mode == 'highDensity' and + sku != 'standard3' + ): + self.fail("Hosting mode could not be 'highDensity' if sku is not 'standard3'.") + + if ( + sku == 'standard3' and + hosting_mode == 'highDensity' + and partition_count not in [1, 2, 3] + ): + self.fail("Partition count must be 1, 2 or 3 if hosting mode is 'highDensity' and sku 'standard3'.") + + if partition_count not in [1, 2, 3, 4, 6, 12]: + self.fail("Partition count must be 1, 2, 3, 4, 6 or 12.") + + if sku == 'basic': + if replica_count not in [1, 2, 3]: + self.fail("Replica count must be between 1 and 3.") + else: + if replica_count < 1 or replica_count > 12: + self.fail("Replica count must be between 1 and 12.") + + def create_search(self): + self.log("Creating search {0}".format(self.name)) + + self.check_values(self.hosting_mode, self.sku, self.partition_count, self.replica_count) + + self.check_name_availability() + self.results['changed'] = True + + if self.network_rule_set: + for rule in self.network_rule_set: + self.firewall_list.append(self.search_client.services.models.IpRule(value=rule)) + + search_model = self.search_client.services.models.SearchService( + hosting_mode=self.hosting_mode, + identity=self.search_client.services.models.Identity(type=self.identity), + location=self.location, + network_rule_set=dict(ip_rules=self.firewall_list) if len(self.firewall_list) > 0 else None, + partition_count=self.partition_count, + public_network_access=self.public_network_access, + replica_count=self.replica_count, + sku=self.search_client.services.models.Sku(name=self.sku), + tags=self.tags + ) + + try: + poller = self.search_client.services.begin_create_or_update(self.resource_group, self.name, search_model) + self.get_poller_result(poller) + except Exception as e: + self.log('Error creating Azure Search.') + self.fail("Failed to create Azure Search: {0}".format(str(e))) + + return self.get_search() + + def update_search(self): + self.log("Updating search {0}".format(self.name)) + + self.check_values( + self.hosting_mode or self.account_dict.get('hosting_mode'), + self.sku or self.account_dict.get('sku'), + self.partition_count or self.account_dict.get('partition_count'), + self.replica_count or self.account_dict.get('replica_count') + ) + + search_update_model = self.search_client.services.models.SearchServiceUpdate( + location=self.location, + hosting_mode=None, + partition_count=None, + public_network_access=None, + replica_count=None, + sku=self.search_client.services.models.Sku(name=self.account_dict.get('sku')) + ) + + if self.hosting_mode and self.account_dict.get('hosting_mode') != self.hosting_mode: + self.fail("Updating hosting_mode of an existing search service is not allowed.") + + if self.identity and self.account_dict.get('identity').get('type') != self.identity: + self.results['changed'] = True + search_update_model.identity = self.search_client.services.models.Identity(type=self.identity) + + if self.network_rule_set: + for rule in self.network_rule_set: + if len(self.network_rule_set) != len(self.account_dict.get('network_rule_set')) or rule not in self.account_dict.get('network_rule_set'): + self.results['changed'] = True + self.firewall_list.append(self.search_client.services.models.IpRule(value=rule)) + search_update_model.network_rule_set = dict(ip_rules=self.firewall_list) + + if self.partition_count and self.account_dict.get('partition_count') != self.partition_count: + self.results['changed'] = True + search_update_model.partition_count = self.partition_count + + if self.public_network_access and self.account_dict.get('public_network_access').lower() != self.public_network_access.lower(): + self.results['changed'] = True + search_update_model.public_network_access = self.public_network_access + + if self.replica_count and self.account_dict.get('replica_count') != self.replica_count: + self.results['changed'] = True + search_update_model.replica_count = self.replica_count + + if self.sku and self.account_dict.get('sku') != self.sku: + self.fail("Updating sku of an existing search service is not allowed.") + + if self.tags and self.account_dict.get('tags') != self.tags: + self.results['changed'] = True + search_update_model.tags = self.tags + + self.log('Updating search {0}'.format(self.name)) + + try: + if self.results['changed']: + poller = self.search_client.services.begin_create_or_update(self.resource_group, self.name, search_update_model) + self.get_poller_result(poller) + except Exception as e: + self.fail("Failed to update the search: {0}".format(str(e))) + + return self.get_search() + + def delete_search(self): + self.log('Delete search {0}'.format(self.name)) + + try: + if self.account_dict is not None: + self.results['changed'] = True + self.search_client.services.delete(self.resource_group, self.name) + except Exception as e: + self.fail("Failed to delete the search: {0}".format(str(e))) + + def check_name_availability(self): + self.log('Checking name availability for {0}'.format(self.name)) + try: + response = self.search_client.services.check_name_availability(self.name) + except Exception as e: + self.log('Error attempting to validate name.') + self.fail("Error checking name availability: {0}".format(str(e))) + if not response.is_name_available: + self.log('Error name not available.') + self.fail("{0} - {1}".format(response.message, response.reason)) + + def account_obj_to_dict(self, search_obj): + account_dict = dict( + hosting_mode=search_obj.hosting_mode, + id=search_obj.id, + identity=dict(type=search_obj.identity.type if search_obj.identity else 'None'), + location=search_obj.location, + name=search_obj.name, + network_rule_set=list(), + partition_count=search_obj.partition_count, + provisioning_state=search_obj.provisioning_state, + public_network_access=search_obj.public_network_access, + replica_count=search_obj.replica_count, + sku=search_obj.sku.name, + status=search_obj.status, + tags=search_obj.tags + ) + + if search_obj.identity and search_obj.identity.principal_id: + account_dict['identity']['principal_id'] = search_obj.identity.principal_id + + for rule in search_obj.network_rule_set.ip_rules: + account_dict['network_rule_set'].append(rule.value) + + return account_dict + + +def main(): + AzureRMSearch() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_cognitivesearch_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_cognitivesearch_info.py new file mode 100644 index 000000000..4ae5cf819 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_cognitivesearch_info.py @@ -0,0 +1,316 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 David Duque Hernández, (@next-davidduquehernandez) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: azure_rm_cognitivesearch_info +version_added: "1.4.0" +short_description: Get Azure Cognitive Search service info +description: + - Get info for a specific Azure Cognitive Search service or all Azure Cognitive Search service within a resource group. + +options: + resource_group: + description: + - The name of the Azure resource group. + type: str + name: + description: + - The name of the Azure Cognitive Search service. + type: str + show_keys: + description: + - Retrieve admin and query keys. + type: bool + default: False + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - David Duque Hernández (@next-davidduquehernandez) +''' + +EXAMPLES = ''' + - name: Get Azure Cognitive Search info from resource group 'myResourceGroup' and name 'myAzureSearch' + azure_rm_cognitivesearch_info: + resource_group: myResourceGroup + name: myAzureSearch + + - name: Get Azure Cognitive Search info from resource group 'myResourceGroup' + azure_rm_cognitivesearch_info: + resource_group: myResourceGroup + + - name: Get all Azure Cognitive Search info + azure_rm_cognitivesearch_info: +''' + +RETURN = ''' +search: + description: + - Info for Azure Cognitive Search. + returned: always + type: list + contains: + hosting_mode: + description: + - Type of hosting mode selected. + returned: always + type: str + sample: default + id: + description: + - The unique identifier associated with this Azure Cognitive Search. + returned: always + type: str + sample: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + identity: + description: + - The identity of the Azure Cognitive Search Service. + returned: always + type: dict + contains: + principal_id: + description: + - Identifier assigned. + type: str + sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + type: + description: + - Identity type. + returned: always + type: str + sample: SystemAssigned + sample: + principal_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + type: SystemAssigned + keys: + description: + - Admin and query keys for Azure Cognitive Search Service. + type: dict + contains: + admin_primary: + description: + - Primary admin key for Azure Cognitive Search Service. + type: str + sample: 12345ABCDE67890FGHIJ123ABC456DEF + admin_secondary: + description: + - Secondary admin key for Azure Cognitive Search Service. + type: str + sample: 12345ABCDE67890FGHIJ123ABC456DEF + query: + description: + - List of query keys for Azure Cognitive Search Service. + type: list + sample: [{'key': '12345ABCDE67890FGHIJ123ABC456DEF', 'name': 'Query key'}] + location: + description: + - The geo-location where the Azure Cognitive Search Service lives. + returned: always + type: str + sample: West Europe + name: + description: + - The name of the Azure Cognitive Search Service. + returned: always + type: str + sample: myazuresearch + network_rule_set: + description: + - Network specific rules that determine how the Azure Cognitive Search service may be reached. + returned: always + type: list + sample: ['1.1.1.1', '8.8.8.8/31'] + partition_count: + description: + - The number of partitions in the Azure Cognitive Search Service. + returned: always + type: int + sample: 3 + provisioning_state: + description: + - The state of the provisioning state of Azure Cognitive Search Service. + returned: always + type: str + sample: succeeded + public_network_access: + description: + - If it's allowed traffic over public interface. + returned: always + type: str + sample: enabled + replica_count: + description: + - The number of replicas in the Azure Cognitive Search Service. + returned: always + type: int + sample: 3 + sku: + description: + - The SKU of the Azure Cognitive Search Service. + returned: always + type: str + sample: standard + status: + description: + - The state of the Azure Cognitive Search. + returned: always + type: str + sample: Active running + tags: + description: + - The resource tags. + returned: always + type: dict + sample: { "tag1":"abc" } +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMSearchInfo(AzureRMModuleBase): + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type='str'), + resource_group=dict(type='str'), + show_keys=dict(type='bool', default=False) + ) + + self.results = dict( + changed=False, + search=[] + ) + + self.name = None + self.resource_group = None + self.show_keys = False + + super(AzureRMSearchInfo, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=False) + + def exec_module(self, **kwargs): + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.name and not self.resource_group: + self.fail("Parameter error: resource group required when filtering by name.") + + results = [] + if self.name: + results = self.get_search() + elif self.resource_group: + results = self.list_resource_group() + else: + results = self.list_all() + + self.results['search'] = results + return self.results + + def get_search(self): + self.log('Get properties for azure search {0}'.format(self.name)) + search_obj = None + account_dict = None + + try: + search_obj = self.search_client.services.get(self.resource_group, self.name) + except ResourceNotFoundError: + pass + + if search_obj: + account_dict = self.account_obj_to_dict(search_obj) + + return account_dict + + def list_resource_group(self): + self.log('Get basic properties for azure search in resource group {0}'.format(self.resource_group)) + search_obj = None + results = list() + + try: + search_obj = self.search_client.services.list_by_resource_group(self.resource_group) + except Exception: + pass + + if search_obj: + for seach_item in search_obj: + results.append(self.account_obj_to_dict(seach_item)) + return results + + return list() + + def list_all(self): + self.log('Get basic properties for all azure search') + search_obj = None + results = list() + + try: + search_obj = self.search_client.services.list_by_subscription() + except Exception: + pass + + if search_obj: + for search_item in search_obj: + results.append(self.account_obj_to_dict(search_item)) + return results + + return list() + + def account_obj_to_dict(self, search_obj): + account_dict = dict( + hosting_mode=search_obj.hosting_mode, + id=search_obj.id, + identity=dict(type=search_obj.identity.type if search_obj.identity else 'None'), + location=search_obj.location, + name=search_obj.name, + network_rule_set=list(), + partition_count=search_obj.partition_count, + provisioning_state=search_obj.provisioning_state, + public_network_access=search_obj.public_network_access, + replica_count=search_obj.replica_count, + sku=search_obj.sku.name, + status=search_obj.status, + tags=search_obj.tags + ) + + if search_obj.identity and search_obj.identity.principal_id: + account_dict['identity']['principal_id'] = search_obj.identity.principal_id + + for rule in search_obj.network_rule_set.ip_rules: + account_dict['network_rule_set'].append(rule.value) + + if self.show_keys: + account_dict['keys'] = dict() + + admin_keys = self.search_client.admin_keys.get(self.resource_group, self.name) + account_dict['keys']['admin_primary'] = admin_keys.primary_key + account_dict['keys']['admin_secondary'] = admin_keys.secondary_key + + query_keys = self.search_client.query_keys.list_by_search_service(self.resource_group, self.name) + account_dict['keys']['query'] = list() + for key in query_keys: + account_dict['keys']['query'].append(dict(name=key.name, key=key.key)) + + return account_dict + + +def main(): + AzureRMSearchInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_containerinstance.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_containerinstance.py new file mode 100644 index 000000000..2b0bffbc5 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_containerinstance.py @@ -0,0 +1,839 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Zim Kalinowski, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_containerinstance +version_added: "0.1.2" +short_description: Manage an Azure Container Instance +description: + - Create, update and delete an Azure Container Instance. + +options: + resource_group: + description: + - Name of resource group. + type: str + required: true + name: + description: + - The name of the container group. + required: true + type: str + os_type: + description: + - The OS type of containers. + type: str + choices: + - linux + - windows + default: linux + state: + description: + - Assert the state of the container instance. Use C(present) to create or update an container instance and C(absent) to delete it. + type: str + default: present + choices: + - absent + - present + ip_address: + description: + - The IP address type of the container group. + - Default is C(none) and creating an instance without public IP. + type: str + choices: + - public + - none + - private + default: 'none' + dns_name_label: + description: + - The Dns name label for the IP. + type: str + ports: + description: + - List of ports exposed within the container group. + - This option is deprecated, using I(ports) under I(containers)". + type: list + elements: int + location: + description: + - Valid azure location. Defaults to location of the resource group. + type: str + registry_login_server: + description: + - The container image registry login server. + type: str + registry_username: + description: + - The username to log in container image registry server. + type: str + registry_password: + description: + - The password to log in container image registry server. + type: str + containers: + description: + - List of containers. + - Required when creation. + type: list + elements: dict + suboptions: + name: + description: + - The name of the container instance. + type: str + required: true + image: + description: + - The container image name. + type: str + required: true + memory: + description: + - The required memory of the containers in GB. + type: float + default: 1.5 + cpu: + description: + - The required number of CPU cores of the containers. + type: float + default: 1 + ports: + description: + - List of ports exposed within the container group. + type: list + elements: int + environment_variables: + description: + - List of container environment variables. + - When updating existing container all existing variables will be replaced by new ones. + type: list + elements: dict + suboptions: + name: + description: + - Environment variable name. + type: str + required: true + value: + description: + - Environment variable value. + type: str + required: true + is_secure: + description: + - Is variable secure. + type: bool + volume_mounts: + description: + - The volume mounts for the container instance + type: list + elements: dict + suboptions: + name: + description: + - The name of the volume mount + required: true + type: str + mount_path: + description: + - The path within the container where the volume should be mounted + required: true + type: str + read_only: + description: + - The flag indicating whether the volume mount is read-only + type: bool + commands: + description: + - List of commands to execute within the container instance in exec form. + - When updating existing container all existing commands will be replaced by new ones. + type: list + elements: str + restart_policy: + description: + - Restart policy for all containers within the container group. + type: str + choices: + - always + - on_failure + - never + subnet_ids: + description: + - The subnet resource IDs for a container group. + - Multiple subnets are not yet supported. Only 1 subnet can be used. + type: list + elements: str + volumes: + description: + - List of Volumes that can be mounted by containers in this container group. + type: list + elements: dict + suboptions: + name: + description: + - The name of the Volume + required: true + type: str + azure_file: + description: + - The Azure File volume + type: dict + suboptions: + share_name: + description: + - The name of the Azure File share to be mounted as a volume + required: true + type: str + read_only: + description: + - The flag indicating whether the Azure File shared mounted as a volume is read-only + type: bool + storage_account_name: + description: + - The name of the storage account that contains the Azure File share + required: true + type: str + storage_account_key: + description: + - The storage account access key used to access the Azure File share + required: true + type: str + empty_dir: + description: + - The empty directory volume + type: dict + secret: + description: + - The secret volume + type: dict + git_repo: + description: + - The git repo volume + type: dict + suboptions: + directory: + description: + - Target directory name + type: str + repository: + description: + - Repository URL + required: true + type: str + revision: + description: + - Commit hash for the specified revision + type: str + force_update: + description: + - Force update of existing container instance. Any update will result in deletion and recreation of existing containers. + type: bool + default: 'no' + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Create sample container group + azure_rm_containerinstance: + resource_group: myResourceGroup + name: myContainerInstanceGroup + os_type: linux + ip_address: public + containers: + - name: myContainer1 + image: httpd + memory: 1.5 + ports: + - 80 + - 81 + + - name: Create sample container group with azure file share volume + azure_rm_containerinstance: + resource_group: myResourceGroup + name: myContainerInstanceGroupz + os_type: linux + ip_address: public + containers: + - name: mycontainer1 + image: httpd + memory: 1 + volume_mounts: + - name: filesharevolume + mount_path: "/data/files" + ports: + - 80 + - 81 + volumes: + - name: filesharevolume + azure_file: + storage_account_name: mystorageaccount + share_name: acishare + storage_account_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + + - name: Create sample container group with git repo volume + azure_rm_containerinstance: + resource_group: myResourceGroup + name: myContainerInstanceGroup + os_type: linux + ip_address: public + containers: + - name: mycontainer1 + image: httpd + memory: 1 + volume_mounts: + - name: myvolume1 + mount_path: "/mnt/test" + ports: + - 80 + - 81 + volumes: + - name: myvolume1 + git_repo: + repository: "https://github.com/Azure-Samples/aci-helloworld.git" + + - name: Create sample container instance with subnet + azure_rm_containerinstance: + resource_group: myResourceGroup + name: myContainerInstanceGroup + os_type: linux + ip_address: private + location: eastus + subnet_ids: + - "{{ subnet_id }}" + ports: + - 80 + containers: + - name: mycontainer1 + image: httpd + memory: 1.5 + ports: + - 80 + - 81 +''' +RETURN = ''' +id: + description: + - Resource ID. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.ContainerInstance/containerGroups/aci1b6dd89 +provisioning_state: + description: + - Provisioning state of the container. + returned: always + type: str + sample: Creating +ip_address: + description: + - Public IP Address of created container group. + returned: if address is public + type: str + sample: 175.12.233.11 +containers: + description: + - The containers within the container group. + returned: always + type: list + elements: dict + sample: [ + { + "commands": null, + "cpu": 1.0, + "environment_variables": null, + "image": "httpd", + "memory": 1.0, + "name": "mycontainer1", + "ports": [ + 80, + 81 + ], + "volume_mounts": [ + { + "mount_path": "/data/files", + "name": "filesharevolume", + "read_only": false + } + ] + } + ] +volumes: + description: + - The list of volumes that mounted by containers in container group + returned: if volumes specified + type: list + elements: dict + contains: + name: + description: + - The name of the Volume + returned: always + type: str + sample: filesharevolume + azure_file: + description: + - Azure file share volume details + returned: If Azure file share type of volume requested + type: dict + sample: { + "read_only": null, + "share_name": "acishare", + "storage_account_key": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", + "storage_account_name": "mystorageaccount" + } + empty_dir: + description: + - Empty directory volume details + returned: If Empty directory type of volume requested + type: dict + sample: {} + secret: + description: + - Secret volume details + returned: If Secret type of volume requested + type: dict + sample: {} + git_repo: + description: + - Git Repo volume details + returned: If Git repo type of volume requested + type: dict + sample: { + "directory": null, + "repository": "https://github.com/Azure-Samples/aci-helloworld.git", + "revision": null + } +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +from ansible.module_utils.common.dict_transformations import _snake_to_camel + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.core.polling import LROPoller +except ImportError: + # This is handled in azure_rm_common + pass + + +def create_container_dict_from_obj(container): + ''' + Create a dict from an instance of a Container. + + :param rule: Container + :return: dict + ''' + results = dict( + name=container.name, + image=container.image, + memory=container.resources.requests.memory_in_gb, + cpu=container.resources.requests.cpu + # command (list of str) + # ports (list of ContainerPort) + # environment_variables (list of EnvironmentVariable) + # resources (ResourceRequirements) + # volume mounts (list of VolumeMount) + ) + + if container.instance_view is not None: + # instance_view (ContainerPropertiesInstanceView) + results["instance_restart_count"] = container.instance_view.restart_count + if container.instance_view.current_state: + results["instance_current_state"] = container.instance_view.current_state.state + results["instance_current_start_time"] = container.instance_view.current_state.start_time + results["instance_current_exit_code"] = container.instance_view.current_state.exit_code + results["instance_current_finish_time"] = container.instance_view.current_state.finish_time + results["instance_current_detail_status"] = container.instance_view.current_state.detail_status + if container.instance_view.previous_state: + results["instance_previous_state"] = container.instance_view.previous_state.state + results["instance_previous_start_time"] = container.instance_view.previous_state.start_time + results["instance_previous_exit_code"] = container.instance_view.previous_state.exit_code + results["instance_previous_finish_time"] = container.instance_view.previous_state.finish_time + results["instance_previous_detail_status"] = container.instance_view.previous_state.detail_status + # events (list of ContainerEvent) + return results + + +env_var_spec = dict( + name=dict(type='str', required=True), + value=dict(type='str', required=True), + is_secure=dict(type='bool') +) + + +volume_mount_var_spec = dict( + name=dict(type='str', required=True), + mount_path=dict(type='str', required=True), + read_only=dict(type='bool') +) + + +container_spec = dict( + name=dict(type='str', required=True), + image=dict(type='str', required=True), + memory=dict(type='float', default=1.5), + cpu=dict(type='float', default=1), + ports=dict(type='list', elements='int'), + commands=dict(type='list', elements='str'), + environment_variables=dict(type='list', elements='dict', options=env_var_spec), + volume_mounts=dict(type='list', elements='dict', options=volume_mount_var_spec) +) + + +git_repo_volume_spec = dict( + directory=dict(type='str'), + repository=dict(type='str', required=True), + revision=dict(type='str') +) + + +azure_file_volume_spec = dict( + share_name=dict(type='str', required=True), + read_only=dict(type='bool'), + storage_account_name=dict(type='str', required=True), + storage_account_key=dict(type='str', required=True, no_log=True) +) + + +volumes_spec = dict( + name=dict(type='str', required=True), + azure_file=dict(type='dict', options=azure_file_volume_spec), + empty_dir=dict(type='dict'), + secret=dict(type='dict', no_log=True), + git_repo=dict(type='dict', options=git_repo_volume_spec) +) + + +class AzureRMContainerInstance(AzureRMModuleBase): + """Configuration class for an Azure RM container instance resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + os_type=dict( + type='str', + default='linux', + choices=['linux', 'windows'] + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ), + location=dict( + type='str', + ), + ip_address=dict( + type='str', + default='none', + choices=['public', 'none', 'private'] + ), + dns_name_label=dict( + type='str', + ), + ports=dict( + type='list', + elements='int', + default=[] + ), + registry_login_server=dict( + type='str', + default=None + ), + registry_username=dict( + type='str', + default=None + ), + registry_password=dict( + type='str', + default=None, + no_log=True + ), + containers=dict( + type='list', + elements='dict', + options=container_spec + ), + restart_policy=dict( + type='str', + choices=['always', 'on_failure', 'never'] + ), + force_update=dict( + type='bool', + default=False + ), + volumes=dict( + type='list', + elements='dict', + options=volumes_spec + ), + subnet_ids=dict( + type='list', + elements='str', + ), + ) + + self.resource_group = None + self.name = None + self.location = None + self.state = None + self.ip_address = None + self.dns_name_label = None + self.containers = None + self.restart_policy = None + self.subnet_ids = None + + self.tags = None + + self.results = dict(changed=False, state=dict()) + self.cgmodels = None + + required_if = [ + ('state', 'present', ['containers']), ('ip_address', 'private', ['subnet_ids']) + ] + + super(AzureRMContainerInstance, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True, + required_if=required_if) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + resource_group = None + response = None + results = dict() + + # since this client hasn't been upgraded to expose models directly off the OperationClass, fish them out + self.cgmodels = self.containerinstance_client.container_groups.models + + resource_group = self.get_resource_group(self.resource_group) + + if not self.location: + self.location = resource_group.location + + response = self.get_containerinstance() + + if not response: + self.log("Container Group doesn't exist") + + if self.state == 'absent': + self.log("Nothing to delete") + else: + self.force_update = True + else: + self.log("Container instance already exists") + + if self.state == 'absent': + if not self.check_mode: + self.delete_containerinstance() + self.results['changed'] = True + self.log("Container instance deleted") + elif self.state == 'present': + self.log("Need to check if container group has to be deleted or may be updated") + update_tags, newtags = self.update_tags(response.get('tags', dict())) + + if self.force_update: + self.log('Deleting container instance before update') + if not self.check_mode: + self.delete_containerinstance() + elif update_tags: + if not self.check_mode: + self.tags = newtags + self.results['changed'] = True + response = self.update_containerinstance() + + if self.state == 'present': + + self.log("Need to Create / Update the container instance") + + if self.force_update: + self.results['changed'] = True + if self.check_mode: + return self.results + response = self.create_update_containerinstance() + + self.results['id'] = response['id'] + self.results['provisioning_state'] = response['provisioning_state'] + self.results['ip_address'] = response['ip_address']['ip'] if 'ip_address' in response else '' + + self.log("Creation / Update done") + + return self.results + + def update_containerinstance(self): + ''' + Updates a container service with the specified configuration of orchestrator, masters, and agents. + + :return: deserialized container instance state dictionary + ''' + try: + response = self.containerinstance_client.container_groups.update(resource_group_name=self.resource_group, + container_group_name=self.name, + resource=dict(tags=self.tags)) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + except Exception as exc: + self.fail("Error when Updating ACI {0}: {1}".format(self.name, exc.message or str(exc))) + + return response.as_dict() + + def create_update_containerinstance(self): + ''' + Creates or updates a container service with the specified configuration of orchestrator, masters, and agents. + + :return: deserialized container instance state dictionary + ''' + self.log("Creating / Updating the container instance {0}".format(self.name)) + + registry_credentials = None + + if self.registry_login_server is not None: + registry_credentials = [self.cgmodels.ImageRegistryCredential(server=self.registry_login_server, + username=self.registry_username, + password=self.registry_password)] + + ip_address = None + + containers = [] + all_ports = set([]) + for container_def in self.containers: + name = container_def.get("name") + image = container_def.get("image") + memory = container_def.get("memory") + cpu = container_def.get("cpu") + commands = container_def.get("commands") + ports = [] + variables = [] + volume_mounts = [] + + port_list = container_def.get("ports") + if port_list: + for port in port_list: + all_ports.add(port) + ports.append(self.cgmodels.ContainerPort(port=port)) + + variable_list = container_def.get("environment_variables") + if variable_list: + for variable in variable_list: + variables.append(self.cgmodels.EnvironmentVariable(name=variable.get('name'), + value=variable.get('value') if not variable.get('is_secure') else None, + secure_value=variable.get('value') if variable.get('is_secure') else None)) + + volume_mounts_list = container_def.get("volume_mounts") + if volume_mounts_list: + for volume_mount in volume_mounts_list: + volume_mounts.append(self.cgmodels.VolumeMount(name=volume_mount.get('name'), + mount_path=volume_mount.get('mount_path'), + read_only=volume_mount.get('read_only'))) + + containers.append(self.cgmodels.Container(name=name, + image=image, + resources=self.cgmodels.ResourceRequirements( + requests=self.cgmodels.ResourceRequests(memory_in_gb=memory, cpu=cpu) + ), + ports=ports, + command=commands, + environment_variables=variables, + volume_mounts=volume_mounts)) + + if self.ip_address is not None: + # get list of ports + if len(all_ports) > 0: + ports = [] + for port in all_ports: + ports.append(self.cgmodels.Port(port=port, protocol="TCP")) + ip_address = self.cgmodels.IpAddress(ports=ports, dns_name_label=self.dns_name_label, type=self.ip_address) + + subnet_ids = None + if self.subnet_ids is not None: + subnet_ids = [self.cgmodels.ContainerGroupSubnetId(id=item) for item in self.subnet_ids] + + parameters = self.cgmodels.ContainerGroup(location=self.location, + containers=containers, + image_registry_credentials=registry_credentials, + restart_policy=_snake_to_camel(self.restart_policy, True) if self.restart_policy else None, + ip_address=ip_address, + os_type=self.os_type, + subnet_ids=subnet_ids, + volumes=self.volumes, + tags=self.tags) + + try: + response = self.containerinstance_client.container_groups.begin_create_or_update(resource_group_name=self.resource_group, + container_group_name=self.name, + container_group=parameters) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + except Exception as exc: + self.fail("Error when creating ACI {0}: {1}".format(self.name, exc.message or str(exc))) + + return response.as_dict() + + def delete_containerinstance(self): + ''' + Deletes the specified container group instance in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the container instance {0}".format(self.name)) + try: + response = self.containerinstance_client.container_groups.begin_delete(resource_group_name=self.resource_group, container_group_name=self.name) + return True + except Exception as exc: + self.fail('Error when deleting ACI {0}: {1}'.format(self.name, exc.message or str(exc))) + return False + + def get_containerinstance(self): + ''' + Gets the properties of the specified container service. + + :return: deserialized container instance state dictionary + ''' + self.log("Checking if the container instance {0} is present".format(self.name)) + found = False + try: + response = self.containerinstance_client.container_groups.get(resource_group_name=self.resource_group, container_group_name=self.name) + found = True + self.log("Response : {0}".format(response)) + self.log("Container instance : {0} found".format(response.name)) + except ResourceNotFoundError as e: + self.log('Did not find the container instance.') + if found is True: + return response.as_dict() + + return False + + +def main(): + """Main execution""" + AzureRMContainerInstance() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_containerinstance_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_containerinstance_info.py new file mode 100644 index 000000000..3ad3b9722 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_containerinstance_info.py @@ -0,0 +1,359 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Zim Kalinowski, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_containerinstance_info +version_added: "0.1.2" +short_description: Get Azure Container Instance facts +description: + - Get facts of Container Instance. + +options: + resource_group: + description: + - The name of the resource group. + type: str + required: True + name: + description: + - The name of the container instance. + type: str + tags: + description: + - Limit results by providing of tags. Format tags 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Get specific Container Instance facts + azure_rm_containerinstance_info: + resource_group: myResourceGroup + name: myContainer + + - name: List Container Instances in a specified resource group name + azure_rm_containerinstance_info: + resource_group: myResourceGroup + tags: + - key + - key:value +''' + +RETURN = ''' +container_groups: + description: A list of Container Instance dictionaries. + returned: always + type: complex + contains: + id: + description: + - The resource id. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.ContainerInstance/contain + erGroups/myContainer" + resource_group: + description: + - Resource group where the container exists. + returned: always + type: str + sample: testrg + name: + description: + - The resource name. + returned: always + type: str + sample: mycontainers + location: + description: + - The resource location. + returned: always + type: str + sample: westus + os_type: + description: + - The OS type of containers. + returned: always + type: str + sample: linux + ip_address: + description: + - IP address of the container instance. + returned: always + type: str + sample: 173.15.18.1 + dns_name_label: + description: + - The Dns name label for the IP. + returned: always + type: str + sample: mydomain + ports: + description: + - List of ports exposed by the container instance. + returned: always + type: list + sample: [ 80, 81 ] + containers: + description: + - The containers within the container group. + returned: always + type: complex + sample: containers + contains: + name: + description: + - The name of the container instance. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.ContainerInstance + /containerGroups/myContainer" + image: + description: + - The container image name. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.ContainerInstance + /containerGroups/myContainer" + memory: + description: + - The required memory of the containers in GB. + returned: always + type: float + sample: 1.5 + cpu: + description: + - The required number of CPU cores of the containers. + returned: always + type: int + sample: 1 + ports: + description: + - List of ports exposed within the container group. + returned: always + type: list + sample: [ 80, 81 ] + commands: + description: + - List of commands to execute within the container instance in exec form. + returned: always + type: list + sample: [ "pip install abc" ] + volume_mounts: + description: + - The list of volumes mounted in container instance + returned: If volumes mounted in container instance + type: list + sample: [ + { + "mount_path": "/mnt/repo", + "name": "myvolume1" + } + ] + environment_variables: + description: + - List of container environment variables. + type: complex + contains: + name: + description: + - Environment variable name. + type: str + value: + description: + - Environment variable value. + type: str + subnet_ids: + description: + - The subnet resource IDs for a container group. + type: list + returned: always + sample: [{'id': "/subscriptions/xxx-xxx/resourceGroups/myRG/providers/Microsoft.Network/virtualNetworks/vnetrpfx/subnets/subrpfx"}] + volumes: + description: The list of Volumes that can be mounted by container instances + returned: If container group has volumes + type: list + sample: [ + { + "git_repo": { + "repository": "https://github.com/Azure-Samples/aci-helloworld.git" + }, + "name": "myvolume1" + } + ] + tags: + description: Tags assigned to the resource. Dictionary of string:string pairs. + type: dict + sample: { "tag1": "abc" } +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +from ansible.module_utils.common.dict_transformations import _camel_to_snake + +try: + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMContainerInstanceInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str' + ), + tags=dict( + type='list', + elements='str' + ) + ) + # store the results of the module operation + self.results = dict( + changed=False, + ) + self.resource_group = None + self.name = None + self.tags = None + + super(AzureRMContainerInstanceInfo, self).__init__(self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + facts_module=True) + + def exec_module(self, **kwargs): + + is_old_facts = self.module._name == 'azure_rm_containerinstance_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_containerinstance_facts' module has been renamed to 'azure_rm_containerinstance_info'", version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if (self.name is not None): + self.results['containerinstances'] = self.get() + elif (self.resource_group is not None): + self.results['containerinstances'] = self.list_by_resource_group() + else: + self.results['containerinstances'] = self.list_all() + return self.results + + def get(self): + response = None + results = [] + try: + response = self.containerinstance_client.container_groups.get(resource_group_name=self.resource_group, + container_group_name=self.name) + self.log("Response : {0}".format(response)) + except ResourceNotFoundError as e: + self.log('Could not get facts for Container Instances.') + + if response is not None and self.has_tags(response.tags, self.tags): + results.append(self.format_item(response)) + + return results + + def list_by_resource_group(self): + response = None + results = [] + try: + response = self.containerinstance_client.container_groups.list_by_resource_group(resource_group_name=self.resource_group) + self.log("Response : {0}".format(response)) + except Exception as e: + self.fail('Could not list facts for Container Instances.') + + if response is not None: + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(self.format_item(item)) + + return results + + def list_all(self): + response = None + results = [] + try: + response = self.containerinstance_client.container_groups.list() + self.log("Response : {0}".format(response)) + except Exception as e: + self.fail('Could not list facts for Container Instances.') + + if response is not None: + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(self.format_item(item)) + + return results + + def format_item(self, item): + d = item.as_dict() + containers = d['containers'] + ports = d['ip_address']['ports'] if 'ip_address' in d else [] + resource_group = d['id'].split('resourceGroups/')[1].split('/')[0] + + for port_index in range(len(ports)): + ports[port_index] = ports[port_index]['port'] + + for container_index in range(len(containers)): + old_container = containers[container_index] + new_container = { + 'name': old_container['name'], + 'image': old_container['image'], + 'memory': old_container['resources']['requests']['memory_in_gb'], + 'cpu': old_container['resources']['requests']['cpu'], + 'ports': [], + 'commands': old_container.get('command'), + 'environment_variables': old_container.get('environment_variables'), + 'volume_mounts': [] + } + for port_index in range(len(old_container['ports'])): + new_container['ports'].append(old_container['ports'][port_index]['port']) + if 'volume_mounts' in old_container: + for volume_mount_index in range(len(old_container['volume_mounts'])): + new_container['volume_mounts'].append(old_container['volume_mounts'][volume_mount_index]) + containers[container_index] = new_container + + d = { + 'id': d['id'], + 'resource_group': resource_group, + 'name': d['name'], + 'os_type': d['os_type'], + 'dns_name_label': d['ip_address'].get('dns_name_label'), + 'ip_address': d['ip_address']['ip'] if 'ip_address' in d else '', + 'ports': ports, + 'location': d['location'], + 'containers': containers, + 'restart_policy': _camel_to_snake(d.get('restart_policy')) if d.get('restart_policy') else None, + 'tags': d.get('tags', None), + 'subnet_ids': d.get('subnet_ids', None), + 'volumes': d['volumes'] if 'volumes' in d else [] + } + return d + + +def main(): + AzureRMContainerInstanceInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_containerregistry.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_containerregistry.py new file mode 100644 index 000000000..cf974f378 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_containerregistry.py @@ -0,0 +1,407 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Yawei Wang, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_containerregistry +version_added: "0.1.2" +short_description: Manage an Azure Container Registry +description: + - Create, update and delete an Azure Container Registry. + +options: + resource_group: + description: + - Name of a resource group where the Container Registry exists or will be created. + required: true + type: str + name: + description: + - Name of the Container Registry. + required: true + type: str + state: + description: + - Assert the state of the container registry. Use C(present) to create or update an container registry and C(absent) to delete it. + default: present + type: str + choices: + - absent + - present + location: + description: + - Valid azure location. Defaults to location of the resource group. + type: str + admin_user_enabled: + description: + - If enabled, you can use the registry name as username and admin user access key as password to docker login to your container registry. + type: bool + default: no + sku: + description: + - Specifies the SKU to use. Currently can be either C(Basic), C(Standard) or C(Premium). + default: Standard + type: str + choices: + - Basic + - Standard + - Premium + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Yawei Wang (@yaweiw) + +''' + +EXAMPLES = ''' + - name: Create an azure container registry + azure_rm_containerregistry: + name: myRegistry + location: eastus + resource_group: myResourceGroup + admin_user_enabled: true + sku: Premium + tags: + Release: beta1 + Environment: Production + + - name: Remove an azure container registry + azure_rm_containerregistry: + name: myRegistry + resource_group: myResourceGroup + state: absent +''' +RETURN = ''' +id: + description: + - Resource ID. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.ContainerRegistry/registries/myRegistry +name: + description: + - Registry name. + returned: always + type: str + sample: myregistry +location: + description: + - Resource location. + returned: always + type: str + sample: westus +admin_user_enabled: + description: + - Is admin user enabled. + returned: always + type: bool + sample: true +sku: + description: + - The SKU name of the container registry. + returned: always + type: str + sample: Standard +provisioning_state: + description: + - Provisioning state. + returned: always + type: str + sample: Succeeded +login_server: + description: + - Registry login server. + returned: always + type: str + sample: myregistry.azurecr.io +credentials: + description: + - Passwords defined for the registry. + returned: always + type: complex + contains: + password: + description: + - password value. + returned: when registry exists and C(admin_user_enabled) is set + type: str + sample: pass1value + password2: + description: + - password2 value. + returned: when registry exists and C(admin_user_enabled) is set + type: str + sample: pass2value +tags: + description: + - Tags assigned to the resource. Dictionary of string:string pairs. + returned: always + type: dict +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.mgmt.containerregistry.models import ( + Registry, + RegistryUpdateParameters, + Sku, + RegistryNameCheckRequest, + ) +except ImportError as exc: + # This is handled in azure_rm_common + pass + + +def create_containerregistry_dict(registry, credentials): + ''' + Helper method to deserialize a ContainerRegistry to a dict + :param: registry: return container registry object from Azure rest API call + :param: credentials: return credential objects from Azure rest API call + :return: dict of return container registry and it's credentials + ''' + results = dict( + id=registry.id if registry is not None else "", + name=registry.name if registry is not None else "", + location=registry.location if registry is not None else "", + admin_user_enabled=registry.admin_user_enabled if registry is not None else "", + sku=registry.sku.name if registry is not None else "", + provisioning_state=registry.provisioning_state if registry is not None else "", + login_server=registry.login_server if registry is not None else "", + credentials=dict(), + tags=registry.tags if registry is not None else "" + ) + if credentials: + results['credentials'] = dict( + password=credentials.passwords[0].value, + password2=credentials.passwords[1].value + ) + + return results + + +class Actions: + NoAction, Create, Update = range(3) + + +class AzureRMContainerRegistry(AzureRMModuleBase): + """Configuration class for an Azure RM container registry resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ), + location=dict( + type='str' + ), + admin_user_enabled=dict( + type='bool', + default=False + ), + sku=dict( + type='str', + default='Standard', + choices=['Basic', 'Standard', 'Premium'] + ) + ) + + self.resource_group = None + self.name = None + self.location = None + self.state = None + self.sku = None + self.tags = None + + self.results = dict(changed=False, state=dict()) + + super(AzureRMContainerRegistry, self).__init__( + derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + """Main module execution method""" + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + resource_group = None + response = None + to_do = Actions.NoAction + + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + self.location = resource_group.location + + response = self.get_containerregistry() + + if self.state == 'present': + if not response: + to_do = Actions.Create + else: + self.log('Results : {0}'.format(response)) + self.results.update(response) + if response['provisioning_state'] == "Succeeded": + to_do = Actions.NoAction + if (self.location is not None) and self.location != response['location']: + to_do = Actions.Update + elif (self.sku is not None) and self.sku != response['sku']: + to_do = Actions.Update + else: + to_do = Actions.NoAction + + self.log("Create / Update the container registry instance") + if self.check_mode: + return self.results + + self.results.update(self.create_update_containerregistry(to_do)) + if to_do != Actions.NoAction: + self.results['changed'] = True + else: + self.results.update(response) + self.results['changed'] = False + + self.log("Container registry instance created or updated") + elif self.state == 'absent' and response: + self.results['changed'] = True + if self.check_mode: + return self.results + self.delete_containerregistry() + self.log("Container registry instance deleted") + + return self.results + + def create_update_containerregistry(self, to_do): + ''' + Creates or updates a container registry. + + :return: deserialized container registry instance state dictionary + ''' + self.log("Creating / Updating the container registry instance {0}".format(self.name)) + + try: + if to_do != Actions.NoAction: + if to_do == Actions.Create: + name_status = self.containerregistry_client.registries.check_name_availability( + registry_name_check_request=RegistryNameCheckRequest( + name=self.name, + ) + ) + if name_status.name_available: + poller = self.containerregistry_client.registries.begin_create( + resource_group_name=self.resource_group, + registry_name=self.name, + registry=Registry( + location=self.location, + sku=Sku( + name=self.sku + ), + tags=self.tags, + admin_user_enabled=self.admin_user_enabled + ) + ) + else: + raise Exception("Invalid registry name. reason: " + name_status.reason + " message: " + name_status.message) + else: + registry = self.containerregistry_client.registries.get(resource_group_name=self.resource_group, registry_name=self.name) + if registry is not None: + poller = self.containerregistry_client.registries.begin_update( + resource_group_name=self.resource_group, + registry_name=self.name, + registry_update_parameters=RegistryUpdateParameters( + sku=Sku( + name=self.sku + ), + tags=self.tags, + admin_user_enabled=self.admin_user_enabled + ) + ) + else: + raise Exception("Update registry failed as registry '" + self.name + "' doesn't exist.") + response = self.get_poller_result(poller) + if self.admin_user_enabled: + credentials = self.containerregistry_client.registries.list_credentials(resource_group_name=self.resource_group, registry_name=self.name) + else: + self.log('Cannot perform credential operations as admin user is disabled') + credentials = None + else: + response = None + credentials = None + except (Exception) as exc: + self.log('Error attempting to create / update the container registry instance.') + self.fail("Error creating / updating the container registry instance: {0}".format(str(exc))) + return create_containerregistry_dict(response, credentials) + + def delete_containerregistry(self): + ''' + Deletes the specified container registry in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the container registry instance {0}".format(self.name)) + try: + poller = self.containerregistry_client.registries.begin_delete(resource_group_name=self.resource_group, registry_name=self.name) + response = self.get_poller_result(poller) + self.log("Delete container registry response: {0}".format(response)) + except Exception as e: + self.log('Error attempting to delete the container registry instance.') + self.fail("Error deleting the container registry instance: {0}".format(str(e))) + + return True + + def get_containerregistry(self): + ''' + Gets the properties of the specified container registry. + + :return: deserialized container registry state dictionary + ''' + self.log("Checking if the container registry instance {0} is present".format(self.name)) + found = False + try: + response = self.containerregistry_client.registries.get(resource_group_name=self.resource_group, registry_name=self.name) + found = True + self.log("Response : {0}".format(response)) + self.log("Container registry instance : {0} found".format(response.name)) + except ResourceNotFoundError as e: + self.log('Did not find the container registry instance: {0}'.format(str(e))) + response = None + if found is True and self.admin_user_enabled is True: + try: + credentials = self.containerregistry_client.registries.list_credentials(resource_group_name=self.resource_group, registry_name=self.name) + except Exception as e: + self.fail('List registry credentials failed: {0}'.format(str(e))) + credentials = None + elif found is True and self.admin_user_enabled is False: + credentials = None + else: + return None + return create_containerregistry_dict(response, credentials) + + +def main(): + """Main execution""" + AzureRMContainerRegistry() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_containerregistry_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_containerregistry_info.py new file mode 100644 index 000000000..8148674fa --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_containerregistry_info.py @@ -0,0 +1,277 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Zim Kalinowski, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_containerregistry_info +version_added: "0.1.2" +short_description: Get Azure Container Registry facts +description: + - Get facts for Container Registry. + +options: + resource_group: + description: + - The name of the resource group to which the container registry belongs. + type: str + name: + description: + - The name of the container registry. + type: str + retrieve_credentials: + description: + - Retrieve credentials for container registry. + type: bool + default: no + tags: + description: + - List of tags to be matched. + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Get instance of Registry + azure_rm_containerregistry_info: + resource_group: myResourceGroup + name: myRegistry + + - name: List instances of Registry + azure_rm_containerregistry_info: + resource_group: myResourceGroup + tags: + - key + - key:value +''' + +RETURN = ''' +registries: + description: + - A list of dictionaries containing facts for registries. + returned: always + type: complex + contains: + id: + description: + - The resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.ContainerRegistry/registr + ies/myRegistry" + name: + description: + - The name of the resource. + returned: always + type: str + sample: myRegistry + location: + description: + - The location of the resource. This cannot be changed after the resource is created. + returned: always + type: str + sample: westus + admin_user_enabled: + description: + - Is admin user enabled. + returned: always + type: bool + sample: yes + sku: + description: + - The SKU name of the container registry. + returned: always + type: str + sample: Premium + provisioning_state: + description: + - Provisioning state of the container registry. + returned: always + type: str + sample: Succeeded + login_server: + description: + - Login server for the registry. + returned: always + type: str + sample: acrd08521b.azurecr.io + credentials: + description: + - Credentials, fields will be empty if admin user is not enabled for ACR. + returned: when C(retrieve_credentials) is set and C(admin_user_enabled) is set on ACR + type: complex + contains: + username: + description: + - The user name for container registry. + returned: when registry exists and C(admin_user_enabled) is set + type: str + sample: zim + password: + description: + - password value. + returned: when registry exists and C(admin_user_enabled) is set + type: str + sample: pass1value + password2: + description: + - password2 value. + returned: when registry exists and C(admin_user_enabled) is set + type: str + sample: pass2value + tags: + description: + - Tags assigned to the resource. Dictionary of string:string pairs. + type: dict + sample: { "tag1": "abc" } +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + + +class AzureRMContainerRegistryInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + ), + name=dict( + type='str' + ), + retrieve_credentials=dict( + type='bool', + default=False + ), + tags=dict( + type='list', + elements='str' + ) + ) + # store the results of the module operation + self.results = dict( + changed=False + ) + self.resource_group = None + self.name = None + self.retrieve_credentials = False + self.tags = None + + super(AzureRMContainerRegistryInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False, facts_module=True) + + def exec_module(self, **kwargs): + + is_old_facts = self.module._name == 'azure_rm_containerregistry_info' + if is_old_facts: + self.module.deprecate("The 'azure_rm_containerregistry_facts' module has been renamed to 'azure_rm_containerregistry_info'", version=(2.9, )) + + for key in list(self.module_arg_spec) + ['tags']: + setattr(self, key, kwargs[key]) + + if self.name: + self.results['registries'] = self.get() + elif self.resource_group: + self.results['registries'] = self.list_by_resource_group() + else: + self.results['registries'] = self.list_all() + + return self.results + + def get(self): + response = None + results = [] + try: + response = self.containerregistry_client.registries.get(resource_group_name=self.resource_group, + registry_name=self.name) + self.log("Response : {0}".format(response)) + except Exception as e: + self.log("Could not get facts for Registries: {0}".format(str(e))) + + if response is not None: + if self.has_tags(response.tags, self.tags): + results.append(self.format_item(response)) + + return results + + def list_all(self): + response = None + results = [] + try: + response = self.containerregistry_client.registries.list() + self.log("Response : {0}".format(response)) + except Exception as e: + self.fail("Could not get facts for Registries: {0}".format(str(e))) + + if response is not None: + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(self.format_item(item)) + return results + + def list_by_resource_group(self): + response = None + results = [] + try: + response = self.containerregistry_client.registries.list_by_resource_group(resource_group_name=self.resource_group) + self.log("Response : {0}".format(response)) + except Exception as e: + self.fail("Could not get facts for Registries: {0}".format(str(e))) + + if response is not None: + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(self.format_item(item)) + return results + + def format_item(self, item): + d = item.as_dict() + resource_group = d['id'].split('resourceGroups/')[1].split('/')[0] + name = d['name'] + credentials = {} + admin_user_enabled = d['admin_user_enabled'] + + if self.retrieve_credentials and admin_user_enabled: + credentials = self.containerregistry_client.registries.list_credentials(resource_group_name=resource_group, registry_name=name).as_dict() + for index in range(len(credentials['passwords'])): + password = credentials['passwords'][index] + if password['name'] == 'password': + credentials['password'] = password['value'] + elif password['name'] == 'password2': + credentials['password2'] = password['value'] + credentials.pop('passwords') + + d = { + 'resource_group': resource_group, + 'name': d['name'], + 'location': d['location'], + 'admin_user_enabled': admin_user_enabled, + 'sku': d['sku']['tier'].lower(), + 'provisioning_state': d['provisioning_state'], + 'login_server': d['login_server'], + 'id': d['id'], + 'tags': d.get('tags', None), + 'credentials': credentials + } + return d + + +def main(): + AzureRMContainerRegistryInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_containerregistryreplication.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_containerregistryreplication.py new file mode 100644 index 000000000..115d55c29 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_containerregistryreplication.py @@ -0,0 +1,292 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Zim Kalinowski, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_containerregistryreplication +version_added: "0.1.2" +short_description: Manage Replication instance. +description: + - Create, update and delete instance of Replication. + +options: + resource_group: + description: + - The name of the resource group to which the container registry belongs. + required: True + registry_name: + description: + - The name of the container registry. + required: True + replication_name: + description: + - The name of the I(replication). + required: True + replication: + description: + - The parameters for creating a replication. + location: + description: + - Resource location. If not set, location from the resource group will be used as default. + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - "Zim Kalinowski (@zikalino)" + +''' + +EXAMPLES = ''' + - name: Create (or update) Replication + azure_rm_containerregistryreplication: + resource_group: myResourceGroup + registry_name: myRegistry + replication_name: myReplication + replication: replication + location: eastus +''' + +RETURN = ''' +id: + description: + - The resource ID. + returned: always + type: str + sample: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroup/providers/Microsoft.ContainerRegistry/registries/myRegistry/r + eplications/myReplication" +status: + description: + - The status of the replication at the time the operation was called. + returned: always + type: str + sample: Ready +''' + +import time +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.core.polling import LROPoller + from azure.mgmt.containerregistry.models import Replication, ReplicationUpdateParameters +except ImportError: + # This is handled in azure_rm_common + pass + + +def create_replication_dict(replication): + if replication is None: + return None + results = dict( + id=replication.id, + name=replication.name, + location=replication.location, + provisioning_state=replication.provisioning_state, + tags=replication.tags, + status=replication.status.display_status + ) + return results + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMReplications(AzureRMModuleBase): + """Configuration class for an Azure RM Replication resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + registry_name=dict( + type='str', + required=True + ), + replication_name=dict( + type='str', + required=True + ), + replication=dict( + type='dict' + ), + location=dict( + type='str' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.registry_name = None + self.replication_name = None + self.location = None + + self.results = dict(changed=False) + self.state = None + self.to_do = Actions.NoAction + + super(AzureRMReplications, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=False) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()): + if hasattr(self, key): + setattr(self, key, kwargs[key]) + + old_response = None + response = None + + resource_group = self.get_resource_group(self.resource_group) + + if self.location is None: + self.location = resource_group.location + + old_response = self.get_replication() + + if not old_response: + self.log("Replication instance doesn't exist") + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + self.log("Replication instance already exists") + if self.state == 'absent': + self.to_do = Actions.Delete + elif self.state == 'present': + self.log("Need to check if Replication instance has to be deleted or may be updated") + self.to_do = Actions.Update + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log("Need to Create / Update the Replication instance") + + if self.check_mode: + self.results['changed'] = True + return self.results + + response = self.create_update_replication() + + if not old_response: + self.results['changed'] = True + else: + self.results['changed'] = old_response.__ne__(response) + self.log("Creation / Update done") + elif self.to_do == Actions.Delete: + self.log("Replication instance deleted") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_replication() + # make sure instance is actually deleted, for some Azure resources, instance is hanging around + # for some time after deletion -- this should be really fixed in Azure + while self.get_replication(): + time.sleep(20) + else: + self.log("Replication instance unchanged") + self.results['changed'] = False + response = old_response + + if response: + self.results["id"] = response["id"] + self.results["status"] = response["status"] + + return self.results + + def create_update_replication(self): + ''' + Creates or updates Replication with the specified configuration. + + :return: deserialized Replication instance state dictionary + ''' + self.log("Creating / Updating the Replication instance {0}".format(self.replication_name)) + + try: + if self.to_do == Actions.Create: + replication = Replication( + location=self.location, + ) + response = self.containerregistry_client.replications.begin_create(resource_group_name=self.resource_group, + registry_name=self.registry_name, + replication_name=self.replication_name, + replication=replication) + else: + update_params = ReplicationUpdateParameters() + response = self.containerregistry_client.replications.begin_update(resource_group_name=self.resource_group, + registry_name=self.registry_name, + replication_name=self.replication_name, + replication_update_parameters=update_params) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + + except Exception as exc: + self.log('Error attempting to create the Replication instance.') + self.fail("Error creating the Replication instance: {0}".format(str(exc))) + return create_replication_dict(response) + + def delete_replication(self): + ''' + Deletes specified Replication instance in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the Replication instance {0}".format(self.replication_name)) + try: + response = self.containerregistry_client.replications.begin_delete(resource_group_name=self.resource_group, + registry_name=self.registry_name, + replication_name=self.replication_name) + self.get_poller_result(response) + except Exception as e: + self.log('Error attempting to delete the Replication instance.') + self.fail("Error deleting the Replication instance: {0}".format(str(e))) + + return True + + def get_replication(self): + ''' + Gets the properties of the specified Replication. + + :return: deserialized Replication instance state dictionary + ''' + self.log("Checking if the Replication instance {0} is present".format(self.replication_name)) + found = False + try: + response = self.containerregistry_client.replications.get(resource_group_name=self.resource_group, + registry_name=self.registry_name, + replication_name=self.replication_name) + found = True + self.log("Response : {0}".format(response)) + self.log("Replication instance : {0} found".format(response.name)) + except ResourceNotFoundError as e: + self.log('Did not find the Replication instance: {0}'.format(str(e))) + if found is True: + return response.as_dict() + + return False + + +def main(): + """Main execution""" + AzureRMReplications() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_containerregistryreplication_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_containerregistryreplication_info.py new file mode 100644 index 000000000..123a83288 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_containerregistryreplication_info.py @@ -0,0 +1,179 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Zim Kalinowski, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_containerregistryreplication_info +version_added: "0.0.1" +short_description: Get Replication facts. +description: + - Get facts of Replication. + +options: + resource_group: + description: + - The name of the resource group to which the container registry belongs. + required: True + registry_name: + description: + - The name of the container registry. + required: True + replication_name: + description: + - The name of the replication. + required: True + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - "Zim Kalinowski (@zikalino)" + +''' + +EXAMPLES = ''' + - name: Get instance of Replication + azure_rm_containerregistryreplication_info: + resource_group: resource_group_name + registry_name: registry_name + replication_name: replication_name +''' + +RETURN = ''' +replications: + description: A list of dict results where the key is the name of the Replication and the values are the facts for that Replication. + returned: always + type: complex + contains: + replication_name: + description: The key is the name of the server that the values relate to. + type: complex + contains: + id: + description: + - The resource ID. + returned: always + type: str + sample: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroup/providers/Microsoft.ContainerRegistry/registr + ies/myRegistry/replications/myReplication" + name: + description: + - The name of the resource. + returned: always + type: str + sample: myReplication + type: + description: + - The type of the resource. + returned: always + type: str + sample: Microsoft.ContainerRegistry/registries/replications + location: + description: + - The location of the resource. This cannot be changed after the resource is created. + returned: always + type: str + sample: eastus + status: + description: + - The status of the replication at the time the operation was called. + returned: always + type: complex + sample: status + contains: + message: + description: + - The detailed message for the status, including alerts and error messages. + returned: always + type: str + sample: "The replication is ready." + timestamp: + description: + - The timestamp when the status was changed to the current value. + returned: always + type: datetime + sample: "2017-03-01T23:15:37.0707808Z" +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.core.polling import LROPoller +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMReplicationsFacts(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + registry_name=dict( + type='str', + required=True + ), + replication_name=dict( + type='str', + required=True + ) + ) + # store the results of the module operation + self.results = dict( + changed=False, + ansible_facts=dict() + ) + self.resource_group = None + self.registry_name = None + self.replication_name = None + super(AzureRMReplicationsFacts, self).__init__(self.module_arg_spec, supports_check_mode=True) + + def exec_module(self, **kwargs): + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if (self.resource_group is not None and + self.registry_name is not None and + self.replication_name is not None): + self.results['replications'] = self.get() + return self.results + + def get(self): + ''' + Gets facts of the specified Replication. + + :return: deserialized Replicationinstance state dictionary + ''' + response = None + results = {} + try: + response = self.containerregistry_client.replications.get(resource_group_name=self.resource_group, + registry_name=self.registry_name, + replication_name=self.replication_name) + self.log("Response : {0}".format(response)) + except ResourceNotFoundError as e: + self.log('Could not get facts for Replications: {0}'.format(str(e))) + + if response is not None: + results[response.name] = response.as_dict() + + return results + + +def main(): + AzureRMReplicationsFacts() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_containerregistrytag.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_containerregistrytag.py new file mode 100644 index 000000000..112d3d3d4 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_containerregistrytag.py @@ -0,0 +1,349 @@ +#!/usr/bin/python +# +# Copyright (c) 2022 Ross Bender (@l3ender) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_containerregistrytag +version_added: "1.12.0" +short_description: Import or delete tags in Azure Container Registry +description: + - Import or delete tags in Azure Container Registry. + +options: + resource_group: + description: + - The resource group of the registry. + type: str + registry: + description: + - The name of the container registry. + type: str + required: true + repository_name: + description: + - The name of the repository within the registry. + - Required when state = C(absent). + - If omitted when I(state=present), the name of the source repository will be used. + type: str + name: + description: + - The name of the tag. + - If omitted when I(state=present), the name of the source tag will be used. + - If omitted when I(state=absent), the whole repository will be deleted. + type: str + source_image: + description: + - The source image detail. Required when I(state=present). + type: dict + suboptions: + registry_uri: + description: + - The address of the source registry. + type: str + repository: + description: + - Repository name of the source image. + type: str + required: true + name: + description: + - Name of the tag. + type: str + default: latest + credentials: + description: + - Credentials for the source registry. + type: dict + suboptions: + username: + description: + - Username for the source registry. + type: str + password: + description: + - Password for the source registry. + type: str + state: + description: + - State of the container registry tag. + - Use C(present) to create or update a container registry tag and use C(absent) to delete an container registry tag. + type: str + default: present + choices: + - present + - absent + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Ross Bender (@l3ender) +''' + +EXAMPLES = ''' +- name: Import a tag + azure_rm_containerregistrytag: + registry: myRegistry + source_image: + registry_uri: docker.io + repository: library/hello-world + name: latest + +- name: Import a tag to a different name + azure_rm_containerregistrytag: + registry: myRegistry + repository_name: app1 + name: v1 + source_image: + registry_uri: docker.io + repository: library/hello-world + name: latest + +- name: Delete all tags in repository + azure_rm_containerregistrytag: + registry: myRegistry + repository_name: myRepository + state: absent + +- name: Delete specific tag in repository + azure_rm_containerregistrytag: + registry: myRegistry + repository_name: myRepository + name: myTag + state: absent +''' + +RETURN = ''' +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase, azure_id_to_dict + +try: + from azure.mgmt.containerregistry.models import ImportImageParameters, ImportSource, ImportSourceCredentials + from azure.containerregistry import ContainerRegistryClient +except ImportError as exc: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Import, DeleteRepo, DeleteTag = range(4) + + +class AzureRMContainerRegistryTag(AzureRMModuleBase): + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type="str", + ), + registry=dict( + type="str", + required=True, + ), + repository_name=dict( + type="str", + ), + name=dict( + type="str", + ), + source_image=dict( + type="dict", + options=dict( + registry_uri=dict( + type="str", + ), + repository=dict( + type="str", + required=True, + ), + name=dict( + type="str", + default="latest", + ), + credentials=dict( + type="dict", + options=dict( + username=dict(type="str"), + password=dict(type="str", no_log=True), + ) + ), + ), + ), + state=dict( + type="str", + default="present", + choices=["present", "absent"], + ) + ) + + required_if = [ + ("state", "present", ["source_image"]), + ("state", "absent", ["repository_name"]), + ] + + self.results = dict( + changed=True + ) + + self.resource_group = None + self.registry = None + self.repository_name = None + self.name = None + self.source_image = None + self.state = None + + self._client = None + self._todo = Actions.NoAction + + super(AzureRMContainerRegistryTag, self).__init__(self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + facts_module=False, + required_if=required_if) + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()): + setattr(self, key, kwargs[key]) + + self._client = self.get_client() + + if self.state == "present": + repo_name = self.repository_name if self.repository_name else self.source_image["repository"] + tag_name = self.name if self.name else self.source_image["name"] + tag = self.get_tag(repo_name, tag_name) + if not tag: + self._todo = Actions.Import + elif self.state == "absent": + if self.repository_name and self.name: + tag = self.get_tag(self.repository_name, self.name) + if tag: + self._todo = Actions.DeleteTag + else: + repository = self.get_repository(self.repository_name) + if repository: + self._todo = Actions.DeleteRepo + + if self._todo == Actions.Import: + self.log("importing image into registry") + if not self.check_mode: + self.import_tag(self.repository_name, self.name, self.resource_group, self.registry, self.source_image) + elif self._todo == Actions.DeleteTag: + self.log(f"deleting tag {self.repository_name}:{self.name}") + if not self.check_mode: + self.delete_tag(self.repository_name, self.name) + elif self._todo == Actions.DeleteRepo: + self.log(f"deleting repository {self.repository_name}") + if not self.check_mode: + self.delete_repository(self.repository_name) + else: + self.log("no action") + self.results["changed"] = False + + return self.results + + def get_client(self): + registry_endpoint = self.registry if self.registry.endswith(".azurecr.io") else self.registry + ".azurecr.io" + return ContainerRegistryClient( + endpoint=registry_endpoint, + credential=self.azure_auth.azure_credential_track2, + audience="https://management.azure.com", + ) + + def get_repository(self, repository_name): + response = None + try: + response = self._client.get_repository_properties(repository=repository_name) + self.log(f"Response : {response}") + except Exception as e: + self.log(f"Could not get ACR repository for {repository_name} - {str(e)}") + + if response is not None: + return response.name + + return None + + def get_tag(self, repository_name, tag_name): + response = None + try: + self.log(f"Getting tag for {repository_name}:{tag_name}") + response = self._client.get_tag_properties(repository=repository_name, tag=tag_name) + self.log(f"Response : {response}") + except Exception as e: + self.log(f"Could not get ACR tag for {repository_name}:{tag_name} - {str(e)}") + + return response + + def import_tag(self, repository, tag, resource_group, registry, source_image): + source_tag = get_tag(source_image["repository"], source_image["name"]) + dest_repo_name = repository if repository else source_image["repository"] + dest_tag_name = tag if tag else source_image["name"] + dest_tag = get_tag(dest_repo_name, dest_tag_name) + creds = None if not source_image["credentials"] else ImportSourceCredentials( + username=source_image["credentials"]["username"], + password=source_image["credentials"]["password"], + ) + params = ImportImageParameters( + target_tags=[dest_tag], + source=ImportSource( + registry_uri=source_image["registry_uri"], + source_image=source_tag, + credentials=creds, + ) + ) + try: + if not resource_group: + resource_group = self.get_registry_resource_group(registry) + + self.log(f"Importing {source_tag} as {dest_tag} to {registry} in {resource_group}") + poller = self.containerregistry_client.registries.begin_import_image(resource_group_name=resource_group, + registry_name=registry, + parameters=params) + self.get_poller_result(poller) + except Exception as e: + self.fail(f"Could not import {source_tag} as {dest_tag} to {registry} in {resource_group} - {str(e)}") + + def get_registry_resource_group(self, registry_name): + response = None + try: + response = self.containerregistry_client.registries.list() + except Exception as e: + self.fail(f"Could not load resource group for registry {registry_name} - {str(e)}") + + if response is not None: + for item in response: + item_dict = item.as_dict() + if item_dict["name"] == registry_name: + return azure_id_to_dict(item_dict["id"]).get("resourceGroups") + + return None + + def delete_repository(self, repository_name): + try: + self._client.delete_repository(repository=repository_name) + except Exception as e: + self.fail(f"Could not delete repository {repository_name} - {str(e)}") + + def delete_tag(self, repository_name, tag_name): + try: + self._client.delete_tag(repository=repository_name, tag=tag_name) + except Exception as e: + self.fail(f"Could not delete tag {repository_name}:{tag_name} - {str(e)}") + + +def get_tag(repository, tag): + return repository if not tag else repository + ":" + tag + + +def main(): + AzureRMContainerRegistryTag() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_containerregistrytag_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_containerregistrytag_info.py new file mode 100644 index 000000000..678fac68f --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_containerregistrytag_info.py @@ -0,0 +1,236 @@ +#!/usr/bin/python +# +# Copyright (c) 2022 Ross Bender (@l3ender) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_containerregistrytag_info +version_added: "1.12.0" +short_description: Get Azure Container Registry tag facts +description: + - Get facts for Container Registry tags. + +options: + registry: + description: + - The name of the container registry. + type: str + required: true + repository_name: + description: + - Filter results for repository within the registry. If omitted, all repositories will be retrieved. + type: str + name: + description: + - Filter results by tags with a desired name. + type: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Ross Bender (@l3ender) +''' + +EXAMPLES = ''' +- name: Get tags for all repositories in registry + azure_rm_containerregistrytag_info: + registry: myRegistry + +- name: List tags for a specific repository + azure_rm_containerregistrytag_info: + registry: myRegistry + repository_name: myRepository + +- name: List tags matching a name for a specific repository + azure_rm_containerregistrytag_info: + registry: myRegistry + repository_name: myRepository + name: myTag +''' + +RETURN = ''' +repositories: + description: + - A list of dictionaries containing facts for repositories. + returned: always + type: complex + contains: + name: + description: + - The name of the repository. + returned: always + type: str + sample: my-app + tags: + description: + - A list of dictionaries for the tags in the repository. + returned: always + type: complex + contains: + name: + description: + - Name of the tag. + type: str + returned: always + sample: my-tag + digest: + description: + - Digest of the tag. + type: str + returned: always + sample: sha256:7bd8fcb425afc34a7865f85868126e9c4fef5b2d6291986524687d289ab3a64a + created_on: + description: + - Datetime of when the tag was created. + type: str + returned: always + sample: "2022-02-02T18:18:57.145778+00:00" + last_updated_on: + description: + - Datetime of when the tag was last updated. + type: str + returned: always + sample: "2022-02-02T18:18:57.145778+00:00" +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.containerregistry import ContainerRegistryClient + from azure.core.exceptions import ResourceNotFoundError +except ImportError as exc: + # This is handled in azure_rm_common + pass + + +class AzureRMContainerRegistryTagInfo(AzureRMModuleBase): + def __init__(self): + self.module_arg_spec = dict( + registry=dict( + type="str", + required=True, + ), + repository_name=dict( + type="str", + ), + name=dict( + type="str", + ), + ) + + self.results = dict( + changed=False + ) + + self.registry = None + self.repository_name = None + self.name = None + + self._client = None + + super(AzureRMContainerRegistryTagInfo, self).__init__(self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + facts_module=True) + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()): + setattr(self, key, kwargs[key]) + + self._client = self.get_client() + + if self.repository_name and self.name: + self.results["repositories"] = [self.get_tag(self.repository_name, self.name)] + elif self.repository_name: + tags = self.list_by_repository(self.repository_name, self.name) + self.results["repositories"] = [] if not tags else [tags] + else: + self.results["repositories"] = self.list_all_repositories(self.name) + + return self.results + + def get_client(self): + registry_endpoint = self.registry if self.registry.endswith(".azurecr.io") else self.registry + ".azurecr.io" + return ContainerRegistryClient( + endpoint=registry_endpoint, + credential=self.azure_auth.azure_credential_track2, + audience="https://management.azure.com", + ) + + def get_tag(self, repository_name, tag_name): + response = None + try: + response = self._client.get_tag_properties(repository=repository_name, tag=tag_name) + self.log(f"Response : {response}") + except Exception as e: + self.log(f"Could not get ACR tag for {repository_name}:{tag_name} - {str(e)}") + + tags = [] + if response is not None: + tags.append(format_tag(response)) + + return { + "name": repository_name, + "tags": tags, + } + + def list_by_repository(self, repository_name, tag_name): + try: + response = self._client.list_tag_properties(repository=repository_name) + self.log(f"Response : {response}") + tags = [] + for tag in response: + if not tag_name or tag.name == tag_name: + tags.append(format_tag(tag)) + + return { + "name": repository_name, + "tags": tags + } + except ResourceNotFoundError as e: + self.log(f"Could not get ACR tags for {repository_name} - {str(e)}") + + return None + + def list_all_repositories(self, tag_name): + response = None + try: + response = self._client.list_repository_names() + self.log(f"Response : {response}") + except Exception as e: + self.fail(f"Could not get ACR repositories - {str(e)}") + + if response is not None: + results = [] + for repo_name in response: + tags = self.list_by_repository(repo_name, tag_name) + if tags: + results.append(tags) + + return results + + return None + + +def format_tag(tag): + return { + "name": tag.name, + "digest": tag.digest, + "created_on": tag.created_on, + "last_updated_on": tag.last_updated_on, + } + + +def main(): + AzureRMContainerRegistryTagInfo() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_containerregistrywebhook.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_containerregistrywebhook.py new file mode 100644 index 000000000..0175af3e6 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_containerregistrywebhook.py @@ -0,0 +1,330 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Zim Kalinowski, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_containerregistrywebhook +version_added: "0.1.2" +short_description: Manage Webhook instance. +description: + - Create, update and delete instance of Webhook. + +options: + resource_group: + description: + - The name of the resource group to which the container registry belongs. + required: True + registry_name: + description: + - The name of the container registry. + required: True + webhook_name: + description: + - The name of the webhook. + required: True + location: + description: + - Resource location. If not set, location from the resource group will be used as default. + service_uri: + description: + - The service URI for the webhook to post notifications. + custom_headers: + description: + - Custom headers that will be added to the webhook notifications. + status: + description: + - The status of the webhook at the time the operation was called. + choices: + - 'enabled' + - 'disabled' + scope: + description: + - "The scope of repositories where the event can be triggered. For example, 'foo:*' means events for all tags under repository 'foo'. 'foo:bar' m + eans events for 'foo:bar' only. 'foo' is equivalent to 'foo:latest'. Empty means all events." + actions: + description: + - The list of actions that trigger the webhook to post notifications. + type: list + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - "Zim Kalinowski (@zikalino)" + +''' + +EXAMPLES = ''' + - name: Create (or update) Webhook + azure_rm_containerregistrywebhook: + resource_group: myResourceGroup + registry_name: myRegistry + webhook_name: myWebhook + location: eastus +''' + +RETURN = ''' +id: + description: + - The resource ID. + returned: always + type: str + sample: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroup/providers/Microsoft.ContainerRegistry/registries/myRegistry/w + ebhooks/myWebhook" +status: + description: + - "The status of the webhook at the time the operation was called. Possible values include: 'enabled', 'disabled'" + returned: always + type: str + sample: enabled +''' + +import time +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.core.polling import LROPoller +except ImportError: + # This is handled in azure_rm_common + pass + + +def create_webhook_dict(webhook): + if webhook is None: + return None + results = dict( + id=webhook.id, + name=webhook.name, + location=webhook.location, + provisioning_state=webhook.provisioning_state, + tags=webhook.tags, + status=webhook.status + ) + return results + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMWebhooks(AzureRMModuleBase): + """Configuration class for an Azure RM Webhook resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + registry_name=dict( + type='str', + required=True + ), + webhook_name=dict( + type='str', + required=True + ), + location=dict( + type='str' + ), + service_uri=dict( + type='str' + ), + custom_headers=dict( + type='dict' + ), + status=dict( + type='str', + choices=['enabled', + 'disabled'] + ), + scope=dict( + type='str' + ), + actions=dict( + type='list' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.registry_name = None + self.webhook_name = None + self.parameters = dict() + + self.results = dict(changed=False) + self.state = None + self.to_do = Actions.NoAction + + super(AzureRMWebhooks, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=False) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()): + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + if key == "location": + self.parameters["location"] = kwargs[key] + elif key == "service_uri": + self.parameters["service_uri"] = kwargs[key] + elif key == "custom_headers": + self.parameters["custom_headers"] = kwargs[key] + elif key == "status": + self.parameters["status"] = kwargs[key] + elif key == "scope": + self.parameters["scope"] = kwargs[key] + elif key == "actions": + self.parameters["actions"] = kwargs[key] + + old_response = None + response = None + + resource_group = self.get_resource_group(self.resource_group) + + if 'location' not in self.parameters: + self.parameters['location'] = resource_group.location + + old_response = self.get_webhook() + + if not old_response: + self.log("Webhook instance doesn't exist") + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + self.log("Webhook instance already exists") + if self.state == 'absent': + self.to_do = Actions.Delete + elif self.state == 'present': + self.log("Need to check if Webhook instance has to be deleted or may be updated") + self.to_do = Actions.Update + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log("Need to Create / Update the Webhook instance") + + if self.check_mode: + self.results['changed'] = True + return self.results + + response = self.create_update_webhook() + + if not old_response: + self.results['changed'] = True + else: + self.results['changed'] = old_response.__ne__(response) + self.log("Creation / Update done") + elif self.to_do == Actions.Delete: + self.log("Webhook instance deleted") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_webhook() + # make sure instance is actually deleted, for some Azure resources, instance is hanging around + # for some time after deletion -- this should be really fixed in Azure + while self.get_webhook(): + time.sleep(20) + else: + self.log("Webhook instance unchanged") + self.results['changed'] = False + response = old_response + + if response: + self.results["id"] = response["id"] + self.results["status"] = response["status"] + + return self.results + + def create_update_webhook(self): + ''' + Creates or updates Webhook with the specified configuration. + + :return: deserialized Webhook instance state dictionary + ''' + self.log("Creating / Updating the Webhook instance {0}".format(self.webhook_name)) + + try: + if self.to_do == Actions.Create: + response = self.containerregistry_client.webhooks.begin_create(resource_group_name=self.resource_group, + registry_name=self.registry_name, + webhook_name=self.webhook_name, + webhook_create_parameters=self.parameters) + else: + response = self.containerregistry_client.webhooks.begin_update(resource_group_name=self.resource_group, + registry_name=self.registry_name, + webhook_name=self.webhook_name, + webhook_update_parameters=self.parameters) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + + except Exception as exc: + self.log('Error attempting to create the Webhook instance.') + self.fail("Error creating the Webhook instance: {0}".format(str(exc))) + return create_webhook_dict(response) + + def delete_webhook(self): + ''' + Deletes specified Webhook instance in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the Webhook instance {0}".format(self.webhook_name)) + try: + response = self.containerregistry_client.webhooks.begin_delete(resource_group_name=self.resource_group, + registry_name=self.registry_name, + webhook_name=self.webhook_name) + self.get_poller_result(response) + except Exception as e: + self.log('Error attempting to delete the Webhook instance.') + self.fail("Error deleting the Webhook instance: {0}".format(str(e))) + + return True + + def get_webhook(self): + ''' + Gets the properties of the specified Webhook. + + :return: deserialized Webhook instance state dictionary + ''' + self.log("Checking if the Webhook instance {0} is present".format(self.webhook_name)) + found = False + try: + response = self.containerregistry_client.webhooks.get(resource_group_name=self.resource_group, + registry_name=self.registry_name, + webhook_name=self.webhook_name) + found = True + self.log("Response : {0}".format(response)) + self.log("Webhook instance : {0} found".format(response.name)) + except ResourceNotFoundError as e: + self.log('Did not find the Webhook instance: {0}'.format(str(e))) + if found is True: + return response.as_dict() + + return False + + +def main(): + """Main execution""" + AzureRMWebhooks() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_containerregistrywebhook_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_containerregistrywebhook_info.py new file mode 100644 index 000000000..c15ed3ef3 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_containerregistrywebhook_info.py @@ -0,0 +1,178 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Zim Kalinowski, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_containerregistrywebhook_info +version_added: "0.1.2" +short_description: Get Webhook facts. +description: + - Get facts of Webhook. + +options: + resource_group: + description: + - The name of the resource group to which the container registry belongs. + required: True + registry_name: + description: + - The name of the container registry. + required: True + webhook_name: + description: + - The name of the webhook. + required: True + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - "Zim Kalinowski (@zikalino)" + +''' + +EXAMPLES = ''' + - name: Get instance of Webhook + azure_rm_containerregistrywebhook_info: + resource_group: resource_group_name + registry_name: registry_name + webhook_name: webhook_name +''' + +RETURN = ''' +webhooks: + description: A list of dict results where the key is the name of the Webhook and the values are the facts for that Webhook. + returned: always + type: complex + contains: + webhook_name: + description: The key is the name of the server that the values relate to. + type: complex + contains: + id: + description: + - The resource ID. + returned: always + type: str + sample: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroup/providers/Microsoft.ContainerRegistry/registr + ies/myRegistry/webhooks/myWebhook" + name: + description: + - The name of the resource. + returned: always + type: str + sample: myWebhook + type: + description: + - The type of the resource. + returned: always + type: str + sample: Microsoft.ContainerRegistry/registries/webhooks + location: + description: + - The location of the resource. This cannot be changed after the resource is created. + returned: always + type: str + sample: westus + status: + description: + - "The status of the webhook at the time the operation was called. Possible values include: 'enabled', 'disabled'" + returned: always + type: str + sample: enabled + scope: + description: + - "The scope of repositories where the event can be triggered. For example, 'foo:*' means events for all tags under repository 'foo'. + 'foo:bar' means events for 'foo:bar' only. 'foo' is equivalent to 'foo:latest'. Empty means all events." + returned: always + type: str + sample: myRepository + actions: + description: + - The list of actions that trigger the webhook to post notifications. + returned: always + type: str + sample: "[\n\n 'push'\n\n]" +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMWebhooksFacts(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + registry_name=dict( + type='str', + required=True + ), + webhook_name=dict( + type='str', + required=True + ) + ) + # store the results of the module operation + self.results = dict( + changed=False, + ansible_facts=dict() + ) + self.resource_group = None + self.registry_name = None + self.webhook_name = None + super(AzureRMWebhooksFacts, self).__init__(self.module_arg_spec, supports_check_mode=True) + + def exec_module(self, **kwargs): + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if (self.resource_group is not None and + self.registry_name is not None and + self.webhook_name is not None): + self.results['webhooks'] = self.get() + return self.results + + def get(self): + ''' + Gets facts of the specified Webhook. + + :return: deserialized Webhookinstance state dictionary + ''' + response = None + results = {} + try: + response = self.containerregistry_client.webhooks.get(resource_group_name=self.resource_group, + registry_name=self.registry_name, + webhook_name=self.webhook_name) + self.log("Response : {0}".format(response)) + except ResourceNotFoundError as e: + self.log('Could not get facts for Webhooks: {0}'.format(str(e))) + + if response is not None: + results[response.name] = response.as_dict() + + return results + + +def main(): + AzureRMWebhooksFacts() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_cosmosdbaccount.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_cosmosdbaccount.py new file mode 100644 index 000000000..b3066126d --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_cosmosdbaccount.py @@ -0,0 +1,647 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Zim Kalinowski, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_cosmosdbaccount +version_added: "0.1.2" +short_description: Manage Azure Database Account instance +description: + - Create, update and delete instance of Azure Database Account. + +options: + resource_group: + description: + - Name of an Azure resource group. + required: True + name: + description: + - Cosmos DB database account name. + required: True + location: + description: + - The location of the resource group to which the resource belongs. + - Required when I(state=present). + kind: + description: + - Indicates the type of database account. This can only be set at database account creation. + choices: + - 'global_document_db' + - 'mongo_db' + - 'parse' + consistency_policy: + description: + - The consistency policy for the Cosmos DB account. + suboptions: + default_consistency_level: + description: + - The default consistency level and configuration settings of the Cosmos DB account. + - Required when I(state=present). + choices: + - 'eventual' + - 'session' + - 'bounded_staleness' + - 'strong' + - 'consistent_prefix' + max_staleness_prefix: + description: + - When used with the Bounded Staleness consistency level, this value represents the number of stale requests tolerated. + - Accepted range for this value is 1 - 2,147,483,647. Required when I(default_consistency_policy=bounded_staleness). + type: int + max_interval_in_seconds: + description: + - When used with the Bounded Staleness consistency level, this value represents the time amount of staleness (in seconds) tolerated. + - Accepted range for this value is 5 - 86400. Required when I(default_consistency_policy=bounded_staleness). + type: int + geo_rep_locations: + description: + - An array that contains the georeplication locations enabled for the Cosmos DB account. + - Required when I(state=present). + type: list + suboptions: + name: + description: + - The name of the region. + failover_priority: + description: + - The failover priority of the region. A failover priority of 0 indicates a write region. + - The maximum value for a failover priority = (total number of regions - 1). + - Failover priority values must be unique for each of the regions in which the database account exists. + type: int + database_account_offer_type: + description: + - Database account offer type, for example I(Standard) + - Required when I(state=present). + enable_free_tier: + description: + - If enabled the account is free-tier. + type: bool + default: false + version_added: "1.10.0" + ip_range_filter: + description: + - (deprecated) Cosmos DB Firewall support. This value specifies the set of IP addresses or IP address ranges. + - In CIDR form to be included as the allowed list of client IPs for a given database account. + - IP addresses/ranges must be comma separated and must not contain any spaces. + - This value has been deprecated, and will be removed in a later version. Use I(ip_rules) instead. + ip_rules: + description: + - The IP addresses or IP address ranges in CIDR form to be included as the allowed list of client IPs. + type: list + elements: str + version_added: "1.10.0" + is_virtual_network_filter_enabled: + description: + - Flag to indicate whether to enable/disable Virtual Network ACL rules. + type: bool + enable_automatic_failover: + description: + - Enables automatic failover of the write region in the rare event that the region is unavailable due to an outage. + - Automatic failover will result in a new write region for the account and is chosen based on the failover priorities configured for the account. + type: bool + enable_cassandra: + description: + - Enable Cassandra. + type: bool + enable_table: + description: + - Enable Table. + type: bool + enable_gremlin: + description: + - Enable Gremlin. + type: bool + mongo_version: + description: + - Server version for the MongoDB account, such as c(3.2) or c(4.0). + - Only used when c(kind) = i(mongo_db). + type: str + version_added: "1.10.0" + public_network_access: + description: + - Enables or disables public network access to server. + type: str + default: Enabled + choices: + - Enabled + - Disabled + version_added: "1.10.0" + virtual_network_rules: + description: + - List of Virtual Network ACL rules configured for the Cosmos DB account. + type: list + suboptions: + subnet: + description: + - It can be a string containing resource id of a subnet. + - It can be a dictionary containing 'resource_group', 'virtual_network_name' and 'subnet_name' + ignore_missing_v_net_service_endpoint: + description: + - Create Cosmos DB account without existing virtual network service endpoint. + type: bool + enable_multiple_write_locations: + description: + - Enables the account to write in multiple locations + type: bool + state: + description: + - Assert the state of the Database Account. + - Use C(present) to create or update an Database Account and C(absent) to delete it. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Create Cosmos DB Account - min + azure_rm_cosmosdbaccount: + resource_group: myResourceGroup + name: myDatabaseAccount + location: westus + geo_rep_locations: + - name: southcentralus + failover_priority: 0 + database_account_offer_type: Standard + + - name: Create Cosmos DB Account - max + azure_rm_cosmosdbaccount: + resource_group: myResourceGroup + name: myDatabaseAccount + location: westus + kind: mongo_db + geo_rep_locations: + - name: southcentralus + failover_priority: 0 + database_account_offer_type: Standard + ip_rules: + - 10.10.10.10 + enable_multiple_write_locations: yes + virtual_network_rules: + - subnet: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/myVi + rtualNetwork/subnets/mySubnet" + consistency_policy: + default_consistency_level: bounded_staleness + max_staleness_prefix: 10 + max_interval_in_seconds: 1000 +''' + +RETURN = ''' +id: + description: + - The unique resource identifier of the database account. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DocumentDB/databaseAccounts/myData + baseAccount" +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +from ansible.module_utils.common.dict_transformations import _snake_to_camel + +try: + from azure.core.polling import LROPoller + from azure.core.exceptions import ResourceNotFoundError + from msrestazure.azure_operation import AzureOperationPoller + from azure.mgmt.cosmosdb import CosmosDBManagementClient + from ansible.module_utils.six import string_types +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMCosmosDBAccount(AzureRMModuleBase): + """Configuration class for an Azure RM Database Account resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + location=dict( + type='str' + ), + kind=dict( + type='str', + choices=['global_document_db', + 'mongo_db', + 'parse'] + ), + consistency_policy=dict( + type='dict', + options=dict( + default_consistency_level=dict( + type='str', + choices=['eventual', + 'session', + 'bounded_staleness', + 'strong', + 'consistent_prefix'] + ), + max_staleness_prefix=dict( + type='int' + ), + max_interval_in_seconds=dict( + type='int' + ) + ) + ), + geo_rep_locations=dict( + type='list', + options=dict( + name=dict( + type='str', + required=True + ), + failover_priority=dict( + type='int', + required=True + ) + ) + ), + database_account_offer_type=dict( + type='str' + ), + enable_free_tier=dict( + type='bool', + default=False, + ), + ip_range_filter=dict( + type='str' + ), + ip_rules=dict( + type='list', + elements='str', + ), + is_virtual_network_filter_enabled=dict( + type='bool' + ), + enable_automatic_failover=dict( + type='bool' + ), + enable_cassandra=dict( + type='bool' + ), + enable_table=dict( + type='bool' + ), + enable_gremlin=dict( + type='bool' + ), + mongo_version=dict( + type='str' + ), + public_network_access=dict( + type='str', + default='Enabled', + choices=['Enabled', 'Disabled'] + ), + virtual_network_rules=dict( + type='list', + options=dict( + id=dict( + type='str', + required=True + ), + ignore_missing_v_net_service_endpoint=dict( + type='bool' + ) + ) + ), + enable_multiple_write_locations=dict( + type='bool' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.name = None + self.parameters = dict() + + self.results = dict(changed=False) + self.mgmt_client = None + self.state = None + self.to_do = Actions.NoAction + + super(AzureRMCosmosDBAccount, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()) + ['tags']: + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + self.parameters[key] = kwargs[key] + + kind = self.parameters.get('kind') + if kind == 'global_document_db': + self.parameters['kind'] = 'GlobalDocumentDB' + elif kind == 'mongo_db': + self.parameters['kind'] = 'MongoDB' + elif kind == 'parse': + self.parameters['kind'] = 'Parse' + + ip_range_filter = self.parameters.pop('ip_range_filter', None) + ip_rules = self.parameters.pop('ip_rules', []) + if ip_range_filter: + self.parameters['ip_rules'] = [{"ip_address_or_range": ip} for ip in ip_range_filter.split(",")] + if ip_rules: + # overrides deprecated 'ip_range_filter' parameter + self.parameters['ip_rules'] = [{"ip_address_or_range": ip} for ip in ip_rules] + + dict_camelize(self.parameters, ['consistency_policy', 'default_consistency_level'], True) + dict_rename(self.parameters, ['geo_rep_locations', 'name'], 'location_name') + dict_rename(self.parameters, ['geo_rep_locations'], 'locations') + self.parameters['capabilities'] = [] + if self.parameters.pop('enable_cassandra', False): + self.parameters['capabilities'].append({'name': 'EnableCassandra'}) + if self.parameters.pop('enable_table', False): + self.parameters['capabilities'].append({'name': 'EnableTable'}) + if self.parameters.pop('enable_gremlin', False): + self.parameters['capabilities'].append({'name': 'EnableGremlin'}) + + mongo_version = self.parameters.pop('mongo_version', None) + if kind == 'mongo_db' and mongo_version is not None: + self.parameters['api_properties'] = dict() + self.parameters['api_properties']['server_version'] = mongo_version + + for rule in self.parameters.get('virtual_network_rules', []): + subnet = rule.pop('subnet') + if isinstance(subnet, dict): + virtual_network_name = subnet.get('virtual_network_name') + subnet_name = subnet.get('subnet_name') + resource_group_name = subnet.get('resource_group', self.resource_group) + template = "/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/virtualNetworks/{2}/subnets/{3}" + subnet = template.format(self.subscription_id, resource_group_name, virtual_network_name, subnet_name) + rule['id'] = subnet + + response = None + + self.mgmt_client = self.get_mgmt_svc_client(CosmosDBManagementClient, + is_track2=True, + base_url=self._cloud_environment.endpoints.resource_manager) + + resource_group = self.get_resource_group(self.resource_group) + + if "location" not in self.parameters: + self.parameters["location"] = resource_group.location + + old_response = self.get_databaseaccount() + + if not old_response: + self.log("Database Account instance doesn't exist") + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + self.log("Database Account instance already exists") + if self.state == 'absent': + self.to_do = Actions.Delete + elif self.state == 'present': + old_response['locations'] = old_response['failover_policies'] + if not default_compare(self.parameters, old_response, '', self.results): + self.to_do = Actions.Update + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log("Need to Create / Update the Database Account instance") + + if self.check_mode: + self.results['changed'] = True + return self.results + + response = self.create_update_databaseaccount() + + self.results['changed'] = True + self.log("Creation / Update done") + elif self.to_do == Actions.Delete: + self.log("Database Account instance deleted") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_databaseaccount() + else: + self.log("Database Account instance unchanged") + self.results['changed'] = False + response = old_response + + if self.state == 'present': + self.results.update({'id': response.get('id', None)}) + return self.results + + def create_update_databaseaccount(self): + ''' + Creates or updates Database Account with the specified configuration. + + :return: deserialized Database Account instance state dictionary + ''' + self.log("Creating / Updating the Database Account instance {0}".format(self.name)) + + try: + response = self.mgmt_client.database_accounts.begin_create_or_update(resource_group_name=self.resource_group, + account_name=self.name, + create_update_parameters=self.parameters) + if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller): + response = self.get_poller_result(response) + + except Exception as exc: + self.log('Error attempting to create the Database Account instance.') + self.fail("Error creating the Database Account instance: {0}".format(str(exc))) + return response.as_dict() + + def delete_databaseaccount(self): + ''' + Deletes specified Database Account instance in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the Database Account instance {0}".format(self.name)) + try: + response = self.mgmt_client.database_accounts.begin_delete(resource_group_name=self.resource_group, + account_name=self.name) + + if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller): + response = self.get_poller_result(response) + except Exception as e: + self.log('Error attempting to delete the Database Account instance.') + self.fail("Error deleting the Database Account instance: {0}".format(str(e))) + + return True + + def get_databaseaccount(self): + ''' + Gets the properties of the specified Database Account. + + :return: deserialized Database Account instance state dictionary + ''' + self.log("Checking if the Database Account instance {0} is present".format(self.name)) + found = False + try: + response = self.mgmt_client.database_accounts.get(resource_group_name=self.resource_group, + account_name=self.name) + if not response: + return False + + found = True + self.log("Response : {0}".format(response)) + self.log("Database Account instance : {0} found".format(response.name)) + except ResourceNotFoundError as e: + self.log('Did not find the Database Account instance.') + if found is True: + return response.as_dict() + + return False + + +def default_compare(new, old, path, result): + ''' + :return: false if differences are found between old and new. + ''' + if new is None: + return True + elif isinstance(new, dict): + if not isinstance(old, dict): + result['compare'] = 'changed [' + path + '] old dict is null' + return False + for k in new.keys(): + if not default_compare(new.get(k), old.get(k, None), path + '/' + k, result): + return False + return True + elif isinstance(new, list): + if not isinstance(old, list) or len(new) != len(old): + result['compare'] = 'changed [' + path + '] length is different or null' + return False + elif len(old) == 0: + return True + elif isinstance(old[0], dict): + key = None + if 'id' in old[0] and 'id' in new[0]: + key = 'id' + elif 'name' in old[0] and 'name' in new[0]: + key = 'name' + else: + key = list(old[0])[0] + new = sorted(new, key=lambda x: x.get(key, '')) + old = sorted(old, key=lambda x: x.get(key, '')) + else: + new = sorted(new) + old = sorted(old) + for i in range(len(new)): + if not default_compare(new[i], old[i], path + '/*', result): + return False + return True + else: + if path == '/location' or path.endswith('location_name'): + new = new.replace(' ', '').lower() + old = new.replace(' ', '').lower() + if isinstance(old, string_types) and isinstance(new, string_types): + new = new.lower() + old = old.lower() + if new == old: + return True + else: + result['compare'] = 'changed [' + path + '] ' + str(new) + ' != ' + str(old) + return False + + +def dict_camelize(d, path, camelize_first): + if isinstance(d, list): + for i in range(len(d)): + dict_camelize(d[i], path, camelize_first) + elif isinstance(d, dict): + if len(path) == 1: + old_value = d.get(path[0], None) + if old_value is not None: + d[path[0]] = _snake_to_camel(old_value, camelize_first) + else: + sd = d.get(path[0], None) + if sd is not None: + dict_camelize(sd, path[1:], camelize_first) + + +def dict_upper(d, path): + if isinstance(d, list): + for i in range(len(d)): + dict_upper(d[i], path) + elif isinstance(d, dict): + if len(path) == 1: + old_value = d.get(path[0], None) + if old_value is not None: + d[path[0]] = old_value.upper() + else: + sd = d.get(path[0], None) + if sd is not None: + dict_upper(sd, path[1:]) + + +def dict_rename(d, path, new_name): + if isinstance(d, list): + for i in range(len(d)): + dict_rename(d[i], path, new_name) + elif isinstance(d, dict): + if len(path) == 1: + old_value = d.pop(path[0], None) + if old_value is not None: + d[new_name] = old_value + else: + sd = d.get(path[0], None) + if sd is not None: + dict_rename(sd, path[1:], new_name) + + +def dict_expand(d, path, outer_dict_name): + if isinstance(d, list): + for i in range(len(d)): + dict_expand(d[i], path, outer_dict_name) + elif isinstance(d, dict): + if len(path) == 1: + old_value = d.pop(path[0], None) + if old_value is not None: + d[outer_dict_name] = d.get(outer_dict_name, {}) + d[outer_dict_name] = old_value + else: + sd = d.get(path[0], None) + if sd is not None: + dict_expand(sd, path[1:], outer_dict_name) + + +def main(): + """Main execution""" + AzureRMCosmosDBAccount() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_cosmosdbaccount_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_cosmosdbaccount_info.py new file mode 100644 index 000000000..c97b59ea7 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_cosmosdbaccount_info.py @@ -0,0 +1,556 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Zim Kalinowski, (@zikalino) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_cosmosdbaccount_info +version_added: "0.1.2" +short_description: Get Azure Cosmos DB Account facts +description: + - Get facts of Azure Cosmos DB Account. + +options: + resource_group: + description: + - Name of an Azure resource group. + name: + description: + - Cosmos DB database account name. + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + retrieve_keys: + description: + - Retrieve keys and connection strings. + type: str + choices: + - all + - readonly + retrieve_connection_strings: + description: + - Retrieve connection strings. + type: bool + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Get instance of Database Account + azure_rm_cosmosdbaccount_info: + resource_group: myResourceGroup + name: testaccount + + - name: List instances of Database Account + azure_rm_cosmosdbaccount_info: + resource_group: myResourceGroup + tags: + - key + - key:value +''' + +RETURN = ''' +accounts: + description: A list of dictionaries containing facts for Database Account. + returned: always + type: complex + contains: + id: + description: + - The unique resource identifier of the database account. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DocumentDB/databaseAccount + s/testaccount" + resource_group: + description: + - Name of an Azure resource group. + returned: always + type: str + sample: myResourceGroup + name: + description: + - The name of the database account. + returned: always + type: str + sample: testaccount + location: + description: + - The location of the resource group to which the resource belongs. + returned: always + type: str + sample: westus + kind: + description: + - Indicates the type of database account. + returned: always + type: str + sample: global_document_db + consistency_policy: + description: + - Consistency policy. + returned: always + type: complex + contains: + default_consistency_level: + description: + - Default consistency level. + returned: always + type: str + sample: session + max_interval_in_seconds: + description: + - Maximum interval in seconds. + returned: always + type: int + sample: 5 + max_staleness_prefix: + description: + - Maximum staleness prefix. + returned: always + type: int + sample: 100 + failover_policies: + description: + - The list of new failover policies for the failover priority change. + returned: always + type: complex + contains: + name: + description: + - Location name. + returned: always + type: str + sample: eastus + failover_priority: + description: + - Failover priority. + returned: always + type: int + sample: 0 + id: + description: + - Read location ID. + returned: always + type: str + sample: testaccount-eastus + read_locations: + description: + - Read locations. + returned: always + type: complex + contains: + name: + description: + - Location name. + returned: always + type: str + sample: eastus + failover_priority: + description: + - Failover priority. + returned: always + type: int + sample: 0 + id: + description: + - Read location ID. + returned: always + type: str + sample: testaccount-eastus + document_endpoint: + description: + - Document endpoint. + returned: always + type: str + sample: https://testaccount-eastus.documents.azure.com:443/ + provisioning_state: + description: + - Provisioning state. + returned: always + type: str + sample: Succeeded + write_locations: + description: + - Write locations. + returned: always + type: complex + contains: + name: + description: + - Location name. + returned: always + type: str + sample: eastus + failover_priority: + description: + - Failover priority. + returned: always + type: int + sample: 0 + id: + description: + - Read location ID. + returned: always + type: str + sample: testaccount-eastus + document_endpoint: + description: + - Document endpoint. + returned: always + type: str + sample: https://testaccount-eastus.documents.azure.com:443/ + provisioning_state: + description: + - Provisioning state. + returned: always + type: str + sample: Succeeded + database_account_offer_type: + description: + - Offer type. + returned: always + type: str + sample: Standard + enable_free_tier: + description: + - If enabled the account is free-tier. + returned: always + type: bool + sample: true + version_added: "1.10.0" + ip_range_filter: + description: + - (deprecated) Enabled IP range filter. + - This value has been deprecated, and will be removed in a later version. Use c(ip_rules) instead. + returned: always + type: str + sample: 10.10.10.10 + ip_rules: + description: + - The IP addresses or IP address ranges in CIDR form included as the allowed list of client IPs. + returned: always + type: list + sample: ["10.10.10.10", "20.20.20.20/28"] + version_added: "1.10.0" + is_virtual_network_filter_enabled: + description: + - Enable virtual network filter. + returned: always + type: bool + sample: true + enable_automatic_failover: + description: + - Enable automatic failover. + returned: always + type: bool + sample: true + enable_cassandra: + description: + - Enable Cassandra. + returned: always + type: bool + sample: true + enable_table: + description: + - Enable Table. + returned: always + type: bool + sample: true + enable_gremlin: + description: + - Enable Gremlin. + returned: always + type: bool + sample: true + mongo_version: + description: + - Server version for the MongoDB account. + - Only used for c(kind) = i(mongo_db); otherwise value is null/none. + returned: always + type: str + sample: "4.0" + version_added: "1.10.0" + public_network_access: + description: + - If public network access is allowed to the server. + returned: always + type: str + sample: Enabled + version_added: "1.10.0" + virtual_network_rules: + description: + - List of Virtual Network ACL rules configured for the Cosmos DB account. + type: list + contains: + subnet: + description: + - Resource id of a subnet. + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNet + works/testvnet/subnets/testsubnet1" + ignore_missing_v_net_service_endpoint: + description: + - Create Cosmos DB account without existing virtual network service endpoint. + type: bool + enable_multiple_write_locations: + description: + - Enable multiple write locations. + returned: always + type: bool + sample: true + document_endpoint: + description: + - Document endpoint. + returned: always + type: str + sample: https://testaccount.documents.azure.com:443/ + provisioning_state: + description: + - Provisioning state of Cosmos DB. + returned: always + type: str + sample: Succeeded + primary_master_key: + description: + - Primary master key. + returned: when requested + type: str + sample: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + secondary_master_key: + description: + - Primary master key. + returned: when requested + type: str + sample: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + primary_readonly_master_key: + description: + - Primary master key. + returned: when requested + type: str + sample: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + secondary_readonly_master_key: + description: + - Primary master key. + returned: when requested + type: str + sample: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + connection_strings: + description: + - List of connection strings. + type: list + returned: when requested + contains: + connection_string: + description: + - Description of connection string. + type: str + returned: always + sample: Primary SQL Connection String + description: + description: + - Connection string. + type: str + returned: always + sample: "AccountEndpoint=https://testaccount.documents.azure.com:443/;AccountKey=xxxxx" + tags: + description: + - Tags assigned to the resource. Dictionary of "string":"string" pairs. + returned: always + type: dict + sample: { "tag1":"abc" } +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +from ansible.module_utils.common.dict_transformations import _camel_to_snake + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.mgmt.cosmosdb import CosmosDBManagementClient +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMCosmosDBAccountInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str' + ), + name=dict( + type='str' + ), + tags=dict( + type='list', + elements='str' + ), + retrieve_keys=dict( + type='str', + choices=['all', 'readonly'] + ), + retrieve_connection_strings=dict( + type='bool' + ) + ) + # store the results of the module operation + self.results = dict( + changed=False + ) + self.mgmt_client = None + self.resource_group = None + self.name = None + self.tags = None + self.retrieve_keys = None + self.retrieve_connection_strings = None + + super(AzureRMCosmosDBAccountInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False, facts_module=True) + + def exec_module(self, **kwargs): + + is_old_facts = self.module._name == 'azure_rm_cosmosdbaccount_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_cosmosdbaccount_facts' module has been renamed to 'azure_rm_cosmosdbaccount_info'", version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + self.mgmt_client = self.get_mgmt_svc_client(CosmosDBManagementClient, + is_track2=True, + base_url=self._cloud_environment.endpoints.resource_manager) + + if self.name is not None: + self.results['accounts'] = self.get() + elif self.resource_group is not None: + self.results['accounts'] = self.list_by_resource_group() + else: + self.results['accounts'] = self.list_all() + return self.results + + def get(self): + response = None + results = [] + try: + response = self.mgmt_client.database_accounts.get(resource_group_name=self.resource_group, + account_name=self.name) + self.log("Response : {0}".format(response)) + except ResourceNotFoundError as e: + self.log('Could not get facts for Database Account.') + + if response and self.has_tags(response.tags, self.tags): + results.append(self.format_response(response)) + + return results + + def list_by_resource_group(self): + response = None + results = [] + try: + response = self.mgmt_client.database_accounts.list_by_resource_group(resource_group_name=self.resource_group) + self.log("Response : {0}".format(response)) + except Exception as e: + self.log('Could not get facts for Database Account.') + + if response is not None: + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(self.format_response(item)) + + return results + + def list_all(self): + response = None + results = [] + try: + response = self.mgmt_client.database_accounts.list() + self.log("Response : {0}".format(response)) + except Exception as e: + self.log('Could not get facts for Database Account.') + + if response is not None: + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(self.format_response(item)) + + return results + + def format_response(self, item): + d = item.as_dict() + d = { + 'id': d.get('id'), + 'resource_group': self.parse_resource_to_dict(d.get('id')).get('resource_group'), + 'name': d.get('name', None), + 'location': d.get('location', '').replace(' ', '').lower(), + 'kind': _camel_to_snake(d.get('kind', None)), + 'consistency_policy': {'default_consistency_level': _camel_to_snake(d['consistency_policy']['default_consistency_level']), + 'max_interval_in_seconds': d['consistency_policy']['max_interval_in_seconds'], + 'max_staleness_prefix': d['consistency_policy']['max_staleness_prefix']}, + 'failover_policies': [{'name': fp['location_name'].replace(' ', '').lower(), + 'failover_priority': fp['failover_priority'], + 'id': fp['id']} for fp in d['failover_policies']], + 'read_locations': [{'name': rl['location_name'].replace(' ', '').lower(), + 'failover_priority': rl['failover_priority'], + 'id': rl['id'], + 'document_endpoint': rl['document_endpoint'], + 'provisioning_state': rl['provisioning_state']} for rl in d['read_locations']], + 'write_locations': [{'name': wl['location_name'].replace(' ', '').lower(), + 'failover_priority': wl['failover_priority'], + 'id': wl['id'], + 'document_endpoint': wl['document_endpoint'], + 'provisioning_state': wl['provisioning_state']} for wl in d['write_locations']], + 'database_account_offer_type': d.get('database_account_offer_type'), + 'enable_free_tier': d.get('enable_free_tier'), + 'ip_rules': [ip['ip_address_or_range'] for ip in d.get('ip_rules', [])], + 'ip_range_filter': ",".join([ip['ip_address_or_range'] for ip in d.get('ip_rules', [])]), + 'is_virtual_network_filter_enabled': d.get('is_virtual_network_filter_enabled'), + 'enable_automatic_failover': d.get('enable_automatic_failover'), + 'enable_cassandra': 'EnableCassandra' in d.get('capabilities', []), + 'enable_table': 'EnableTable' in d.get('capabilities', []), + 'enable_gremlin': 'EnableGremlin' in d.get('capabilities', []), + 'mongo_version': d.get('api_properties', {}).get('server_version'), + 'public_network_access': d.get('public_network_access'), + 'virtual_network_rules': d.get('virtual_network_rules'), + 'enable_multiple_write_locations': d.get('enable_multiple_write_locations'), + 'document_endpoint': d.get('document_endpoint'), + 'provisioning_state': d.get('provisioning_state'), + 'tags': d.get('tags', None) + } + + if self.retrieve_keys == 'all': + keys = self.mgmt_client.database_accounts.list_keys(resource_group_name=self.resource_group, + account_name=self.name) + d['primary_master_key'] = keys.primary_master_key + d['secondary_master_key'] = keys.secondary_master_key + d['primary_readonly_master_key'] = keys.primary_readonly_master_key + d['secondary_readonly_master_key'] = keys.secondary_readonly_master_key + elif self.retrieve_keys == 'readonly': + keys = self.mgmt_client.database_accounts.get_read_only_keys(resource_group_name=self.resource_group, + account_name=self.name) + d['primary_readonly_master_key'] = keys.primary_readonly_master_key + d['secondary_readonly_master_key'] = keys.secondary_readonly_master_key + if self.retrieve_connection_strings: + connection_strings = self.mgmt_client.database_accounts.list_connection_strings(resource_group_name=self.resource_group, + account_name=self.name) + d['connection_strings'] = connection_strings.as_dict() + return d + + +def main(): + AzureRMCosmosDBAccountInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_datafactory.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_datafactory.py new file mode 100644 index 000000000..66e75f0c8 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_datafactory.py @@ -0,0 +1,429 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Fred-sun, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_datafactory + +version_added: "0.1.12" + +short_description: Managed data factory + +description: + - Create, update or delete data factory. + +options: + name: + description: + - The factory name. + type: str + required: true + resource_group: + description: + - Limit results by resource group. Required when using name parameter. + type: str + required: true + if_match: + description: + - ETag of the factory entity. + - Should only be specified for get. + - If the ETag matches the existing entity tag, or if * was provided, then no content will be returned. + type: str + repo_configuration: + description: + - The data factory repo configration. + type: dict + suboptions: + type: + description: + - Type of repo configuration. + type: str + required: True + choices: + - FactoryGitHubConfiguration + - FactoryVSTSConfiguration + account_name: + description: + - Account name. + type: str + required: True + collaboration_branch: + description: + - Collaboration branch. + type: str + required: True + root_folder: + description: + - Root folder. + type: str + required: True + repository_name: + description: + - Repository name. + type: str + required: True + project_name: + description: + - VSTS project name. + - Required when I(type=FactoryVSTSConfiguration). + type: str + location: + description: + - Valid Azure location. Defaults to location of the resource group. + type: str + public_network_access: + description: + - Whether or not public network access is allowed for the data factory. + type: str + choices: + - Enabled + - Disabled + state: + description: + - Assert the state of the Public IP. Use C(present) to create or update a and C(absent) to delete. + default: present + choices: + - absent + - present + type: str + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Fred-sun (@Fred-sun) + - xuzhang3 (@xuzhang3) +''' + +EXAMPLES = ''' +- name: Create the data factory + azure_rm_datafactory: + resource_group: "{{ resource_group }}" + name: "{{ name }}" + repo_configuration: + type: FactoryGitHubConfiguration + account_name: Fred-sun + collaboration_branch: testbranch + root_folder: "./" + repository_name: vault + +''' + +RETURN = ''' +state: + description: + - Current state fo the data factory. + returned: always + type: complex + contains: + id: + description: + - The data facotry ID. + type: str + returned: always + sample: "/subscriptions/xxx-xxx/resourceGroups/testRG/providers/Microsoft.DataFactory/factories/testpro" + create_time: + description: + - Time the factory was created in ISO8601 format. + type: str + returned: always + sample: "2022-04-26T08:24:41.391164+00:00" + location: + description: + - The resource location. + type: str + returned: always + sample: eastus + name: + description: + - The resource name. + type: str + returned: always + sample: testfactory + provisioning_state: + description: + - Factory provisioning state, example Succeeded. + type: str + returned: always + sample: Succeeded + e_tag: + description: + - Etag identifies change in the resource. + type: str + returned: always + sample: "3000fa80-0000-0100-0000-6267ac490000" + type: + description: + - The resource type. + type: str + returned: always + sample: "Microsoft.DataFactory/factories" + public_network_access: + description: + - Whether or not public network access is allowed for the data factory. + type: str + returned: always + sample: "Enabled" + tags: + description: + - List the data factory tags. + type: str + returned: always + sample: {'key1':'value1'} + identity: + description: + - Managed service identity of the factory. + type: str + returned: always + contains: + principal_id: + description: + - The principal id of the identity. + type: str + returned: always + sample: "***********" + tenant_id: + description: + - The client tenant id of the identity. + type: str + returned: always + sample: "***********" + repo_configuration: + description: + - Git repo information of the factory. + type: str + returned: always + contains: + type: + description: + - Type of repo configuration. + type: str + returned: always + sample: FactoryGitHubConfiguration + ccount_name: + description: + - Account name. + type: str + returned: always + sample: fredaccount + collaboration_branch: + description: + - Collaboration branch. + type: str + returned: always + sample: branch + repository_name: + description: + - Repository name. + type: str + returned: always + sample: "vault" + root_folder: + description: + - Root folder. + type: str + returned: always + sample: "/home/" +''' + +try: + from azure.core.exceptions import ResourceNotFoundError +except Exception: + # This is handled in azure_rm_common + pass + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +AZURE_OBJECT_CLASS = 'DataFactory' + +repo_configuration_spec = dict( + type=dict(type='str', required=True, choices=['FactoryVSTSConfiguration', 'FactoryGitHubConfiguration']), + account_name=dict(type='str', required=True), + repository_name=dict(type='str', required=True), + collaboration_branch=dict(type='str', required=True), + root_folder=dict(type='str', required=True), + project_name=dict(type='str'), +) + + +class AzureRMDataFactory(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type='str', required=True), + resource_group=dict(type='str', required=True), + if_match=dict(type='str'), + location=dict(type='str'), + public_network_access=dict(type='str', choices=["Enabled", "Disabled"]), + state=dict(type='str', default='present', choices=['absent', 'present']), + repo_configuration=dict(type='dict', options=repo_configuration_spec), + ) + + self.results = dict( + changed=False, + ) + + self.name = None + self.resource_group = None + self.if_match = None + self.location = None + self.tags = None + self.public_network_access = None + self.repo_configuration = None + + super(AzureRMDataFactory, self).__init__(self.module_arg_spec, + supports_check_mode=True, + supports_tags=True, + facts_module=False) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + self.location = resource_group.location + + response = self.get_item() + changed = False + if self.state == 'present': + if response: + if self.tags is not None: + update_tags, tags = self.update_tags(response['tags']) + if update_tags: + changed = True + self.tags = tags + if self.public_network_access is not None and self.public_network_access != response['public_network_access']: + changed = True + else: + self.public_network_access = response['public_network_access'] + + if self.repo_configuration is not None and self.repo_configuration != response['repo_configuration']: + changed = True + else: + self.repo_configuration = response['repo_configuration'] + else: + changed = True + + if self.check_mode: + changed = True + self.log("Check mode test, Data factory will be create or update") + else: + if changed: + if self.repo_configuration: + if self.repo_configuration['type'] == 'FactoryGitHubConfiguration': + repo_parameters = self.datafactory_model.FactoryGitHubConfiguration( + account_name=self.repo_configuration.get('account_name'), + repository_name=self.repo_configuration.get('repository_name'), + collaboration_branch=self.repo_configuration.get('collaboration_branch'), + root_folder=self.repo_configuration.get('root_folder') + ) + else: + repo_parameters = self.datafactory_model.FactoryVSTSConfiguration( + account_name=self.repo_configuration.get('account_name'), + repository_name=self.repo_configuration.get('repository_name'), + collaboration_branch=self.repo_configuration.get('collaboration_branch'), + root_folder=self.repo_configuration.get('root_folder'), + project_name=self.repo_configuration.get('project_name'), + ) + else: + repo_parameters = None + + update_parameters = self.datafactory_model.Factory( + location=self.location, + tags=self.tags, + public_network_access=self.public_network_access, + repo_configuration=repo_parameters + ) + + response = self.create_or_update(update_parameters) + + else: + if self.check_mode: + changed = True + self.log("Check mode test") + if response: + self.log("The Data factory {0} exist, will be deleted".format(self.name)) + changed = True + response = self.delete() + else: + changed = False + + self.results['changed'] = changed + self.results['state'] = response + return self.results + + def get_item(self): + response = None + self.log('Get properties for {0}'.format(self.name)) + try: + response = self.datafactory_client.factories.get(self.resource_group, self.name) + except ResourceNotFoundError: + pass + return self.pip_to_dict(response) if response else None + + def delete(self): + response = None + self.log('Delete data factory for {0}'.format(self.name)) + try: + response = self.datafactory_client.factories.delete(self.resource_group, self.name) + except Exception as ec: + self.fail("Delete fail {0}, error message {1}".format(self.name, ec)) + return self.pip_to_dict(response) if response else None + + def create_or_update(self, parameters): + response = None + self.log('Create data factory for {0}'.format(self.name)) + try: + response = self.datafactory_client.factories.create_or_update(self.resource_group, + self.name, + parameters, + self.if_match) + except Exception as ec: + self.fail("Create fail {0}, error message {1}".format(self.name, ec)) + return self.pip_to_dict(response) if response else None + + def pip_to_dict(self, pip): + result = dict( + id=pip.id, + name=pip.name, + type=pip.type, + location=pip.location, + tags=pip.tags, + e_tag=pip.e_tag, + provisioning_state=pip.provisioning_state, + create_time=pip.create_time, + public_network_access=pip.public_network_access, + repo_configuration=dict(), + identity=dict() + ) + if pip.identity: + result['identity']['principal_id'] = pip.identity.principal_id + result['identity']['tenant_id'] = pip.identity.tenant_id + if pip.repo_configuration: + result['repo_configuration']['account_name'] = pip.repo_configuration.account_name + result['repo_configuration']['repository_name'] = pip.repo_configuration.repository_name + result['repo_configuration']['collaboration_branch'] = pip.repo_configuration.collaboration_branch + result['repo_configuration']['root_folder'] = pip.repo_configuration.root_folder + result['repo_configuration']['type'] = pip.repo_configuration.type + if pip.repo_configuration.type == "FactoryVSTSConfiguration": + result['repo_configuration']['project_name'] = pip.repo_configuration.project_name + return result + + +def main(): + AzureRMDataFactory() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_datafactory_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_datafactory_info.py new file mode 100644 index 000000000..cdd63d717 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_datafactory_info.py @@ -0,0 +1,300 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Fred-sun, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_datafactory_info +version_added: "0.1.12" +short_description: Get data factory facts +description: + - Get facts for a specific data factory. + +options: + name: + description: + - The factory name. + type: str + resource_group: + description: + - Limit results by resource group. Required when using name parameter. + type: str + if_none_match: + description: + - ETag of the factory entity. + - Should only be specified for get. + - If the ETag matches the existing entity tag, or if * was provided, then no content will be returned. + type: str + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Fred-sun (@Fred-sun) + - xuzhang3 (@xuzhang3) +''' + +EXAMPLES = ''' + - name: Get data factory by name + azure_rm_datafactory_info: + resource_group: "{{ resource_group }}" + name: "{{ name }}" + + - name: Get data factory by resource group + azure_rm_datafactory_info: + resource_group: "{{ resource_group }}" + + - name: Get data factory in relate subscription + azure_rm_datafactory_info: + tags: + - key1 +''' + +RETURN = ''' +datafactory: + description: + - Current state fo the data factory. + returned: always + type: complex + contains: + id: + description: + - The data facotry ID. + type: str + returned: always + sample: "/subscriptions/xxx-xxx/resourceGroups/testRG/providers/Microsoft.DataFactory/factories/testpro" + create_time: + description: + - Time the factory was created in ISO8601 format. + type: str + returned: always + sample: "2022-04-26T08:24:41.391164+00:00" + location: + description: + - The resource location. + type: str + returned: always + sample: eastus + name: + description: + - The resource name. + type: str + returned: always + sample: testfactory + provisioning_state: + description: + - Factory provisioning state, example Succeeded. + type: str + returned: always + sample: Succeeded + e_tag: + description: + - Etag identifies change in the resource. + type: str + returned: always + sample: "3000fa80-0000-0100-0000-6267ac490000" + type: + description: + - The resource type. + type: str + returned: always + sample: "Microsoft.DataFactory/factories" + public_network_access: + description: + - Whether or not public network access is allowed for the data factory. + type: str + returned: always + sample: "Enabled" + tags: + description: + - List the data factory tags. + type: str + returned: always + sample: {'key1':'value1'} + identity: + description: + - Managed service identity of the factory. + type: str + returned: always + contains: + principal_id: + description: + - The principal id of the identity. + type: str + returned: always + sample: "***********" + tenant_id: + description: + - The client tenant id of the identity. + type: str + returned: always + sample: "***********" + repo_configuration: + description: + - Git repo information of the factory. + type: str + returned: always + contains: + type: + description: + - Type of repo configuration. + type: str + returned: always + sample: FactoryGitHubConfiguration + ccount_name: + description: + - Account name. + type: str + returned: always + sample: fredaccount + collaboration_branch: + description: + - Collaboration branch. + type: str + returned: always + sample: branch + repository_name: + description: + - Repository name. + type: str + returned: always + sample: "vault" + root_folder: + description: + - Root folder. + type: str + returned: always + sample: "/home/" +''' + +try: + from azure.core.exceptions import ResourceNotFoundError +except Exception: + # This is handled in azure_rm_common + pass + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +AZURE_OBJECT_CLASS = 'DataFactoryInfo' + + +class AzureRMDataFactoryInfo(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type='str'), + resource_group=dict(type='str'), + if_none_match=dict(type='str'), + tags=dict(type='list', elements='str') + ) + + self.results = dict( + changed=False, + ) + + self.name = None + self.resource_group = None + self.if_none_match = None + self.tags = None + + super(AzureRMDataFactoryInfo, self).__init__(self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + facts_module=True) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + result = [] + + if self.name and self.resource_group: + result = self.get_item() + elif self.resource_group: + result = self.list_by_resourcegroup() + else: + result = self.list_all() + + self.results['datafactory'] = self.format(result) + + return self.results + + def format(self, raw): + results = [] + for item in raw: + if self.has_tags(item.tags, self.tags): + results.append(self.pip_to_dict(item)) + return results + + def pip_to_dict(self, pip): + result = dict( + id=pip.id, + name=pip.name, + type=pip.type, + location=pip.location, + tags=pip.tags, + e_tag=pip.e_tag, + provisioning_state=pip.provisioning_state, + create_time=pip.create_time, + repo_configuration=dict(), + identity=dict(), + public_network_access=pip.public_network_access, + ) + if pip.identity: + result['identity']['principal_id'] = pip.identity.principal_id + result['identity']['tenant_id'] = pip.identity.tenant_id + if pip.repo_configuration: + result['repo_configuration']['type'] = pip.repo_configuration.type + result['repo_configuration']['account_name'] = pip.repo_configuration.account_name + result['repo_configuration']['repository_name'] = pip.repo_configuration.repository_name + result['repo_configuration']['collaboration_branch'] = pip.repo_configuration.collaboration_branch + result['repo_configuration']['root_folder'] = pip.repo_configuration.root_folder + if pip.repo_configuration.type == "FactoryVSTSConfiguration": + result['repo_configuration']['project_name'] = pip.repo_configuration.project_name + return result + + def get_item(self): + response = None + self.log('Get properties for {0}'.format(self.name)) + try: + response = self.datafactory_client.factories.get(self.resource_group, self.name, self.if_none_match) + except ResourceNotFoundError: + pass + return [response] if response else [] + + def list_by_resourcegroup(self): + self.log("Get GitHub Access Token Response") + try: + response = self.datafactory_client.factories.list_by_resource_group(self.resource_group) + except Exception: + pass + return response if response else [] + + def list_all(self): + self.log("Get GitHub Access Token Response") + try: + response = self.datafactory_client.factories.list() + except Exception: + pass + return response if response else [] + + +def main(): + AzureRMDataFactoryInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_datalakestore.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_datalakestore.py new file mode 100644 index 000000000..edce24665 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_datalakestore.py @@ -0,0 +1,809 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 David Duque Hernández, (@next-davidduquehernandez) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: azure_rm_datalakestore +version_added: "1.4.0" +short_description: Manage Azure data lake store +description: + - Create, update or delete a data lake store. +options: + default_group: + description: + - The default owner group for all new folders and files created in the Data Lake Store account. + type: str + encryption_config: + description: + - The Key Vault encryption configuration. + type: dict + suboptions: + type: + description: + - The type of encryption configuration being used. + choices: + - UserManaged + - ServiceManaged + required: true + type: str + key_vault_meta_info: + description: + - The Key Vault information for connecting to user managed encryption keys. + type: dict + suboptions: + key_vault_resource_id: + description: + - The resource identifier for the user managed Key Vault being used to encrypt. + type: str + required: true + encryption_key_name: + description: + - The name of the user managed encryption key. + type: str + required: true + encryption_key_version: + description: + - The version of the user managed encryption key. + type: str + required: true + encryption_state: + description: + - The current state of encryption for this Data Lake Store account. + choices: + - Enabled + - Disabled + type: str + firewall_allow_azure_ips: + description: + - The current state of allowing or disallowing IPs originating within Azure through the firewall. + - If the firewall is disabled, this is not enforced. + choices: + - Enabled + - Disabled + type: str + firewall_rules: + description: + - The list of firewall rules associated with this Data Lake Store account. + type: list + elements: dict + suboptions: + name: + description: + - The unique name of the firewall rule to create. + type: str + required: true + start_ip_address: + description: + - The start IP address for the firewall rule. + - This can be either ipv4 or ipv6. + - Start and End should be in the same protocol. + type: str + required: true + end_ip_address: + description: + - The end IP address for the firewall rule. + - This can be either ipv4 or ipv6. + - Start and End should be in the same protocol. + type: str + required: true + firewall_state: + description: + - The current state of the IP address firewall for this Data Lake Store account. + choices: + - Enabled + - Disabled + type: str + identity: + description: + - The Key Vault encryption identity, if any. + choices: + - SystemAssigned + type: str + location: + description: + - The resource location. + type: str + name: + description: + - The name of the Data Lake Store account. + type: str + required: true + new_tier: + description: + - The commitment tier to use for next month. + choices: + - Consumption + - Commitment_1TB + - Commitment_10TB + - Commitment_100TB + - Commitment_500TB + - Commitment_1PB + - Commitment_5PB + type: str + resource_group: + description: + - The name of the Azure resource group to use. + required: true + type: str + aliases: + - resource_group_name + state: + description: + - State of the data lake store. Use C(present) to create or update a data lake store and use C(absent) to delete it. + default: present + choices: + - absent + - present + type: str + virtual_network_rules: + description: + - The list of virtual network rules associated with this Data Lake Store account. + type: list + elements: dict + suboptions: + name: + description: + - The unique name of the virtual network rule to create. + type: str + required: true + subnet_id: + description: + - The resource identifier for the subnet. + type: str + required: true + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - David Duque Hernández (@next-davidduquehernandez) +''' + +EXAMPLES = ''' + - name: Create Azure Data Lake Store + azure_rm_datalakestore: + resource_group: myResourceGroup + name: myDataLakeStore +''' + +RETURN = ''' +state: + description: + - Facts for Azure Data Lake Store created/updated. + returned: always + type: complex + contains: + account_id: + description: + - The unique identifier associated with this Data Lake Store account. + returned: always + type: str + sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + creation_time: + description: + - The account creation time. + returned: always + type: str + sample: '2020-01-01T00:00:00.000000+00:00' + current_tier: + description: + - The commitment tier in use for the current month. + type: str + returned: always + sample: Consumption + default_group: + description: + - The default owner group for all new folders and files created in the Data Lake Store account. + type: str + sample: null + encryption_config: + description: + - The Key Vault encryption configuration. + type: complex + contains: + type: + description: + - The type of encryption configuration being used. + type: str + returned: always + sample: ServiceManaged + key_vault_meta_info: + description: + - The Key Vault information for connecting to user managed encryption keys. + type: complex + contains: + key_vault_resource_id: + description: + - The resource identifier for the user managed Key Vault being used to encrypt. + type: str + returned: always + sample: /subscriptions/{subscriptionId}/resourceGroups/myResourceGroup/providers/Microsoft.KeyVault/vaults/tstkv + encryption_key_name: + description: + - The name of the user managed encryption key. + type: str + returned: always + sample: KeyName + encryption_key_version: + description: + - The version of the user managed encryption key. + type: str + returned: always + sample: 86a1e3b7406f45afa0d54e21eff47e39 + encryption_provisioning_state: + description: + - The current state of encryption provisioning for this Data Lake Store account. + type: str + sample: Succeeded + encryption_state: + description: + - The current state of encryption for this Data Lake Store account. + type: str + returned: always + sample: Enabled + endpoint: + description: + - The full CName endpoint for this account. + returned: always + type: str + sample: testaccount.azuredatalakestore.net + firewall_allow_azure_ips: + description: + - The current state of allowing or disallowing IPs originating within Azure through the firewall. + - If the firewall is disabled, this is not enforced. + type: str + returned: always + sample: Disabled + firewall_rules: + description: + - The list of firewall rules associated with this Data Lake Store account. + type: list + returned: always + contains: + name: + description: + - The resource name. + type: str + returned: always + sample: Example Name + start_ip_address: + description: + - The start IP address for the firewall rule. + - This can be either ipv4 or ipv6. + - Start and End should be in the same protocol. + type: str + returned: always + sample: 192.168.1.1 + end_ip_address: + description: + - The end IP address for the firewall rule. + - This can be either ipv4 or ipv6. + - Start and End should be in the same protocol. + type: str + returned: always + sample: 192.168.1.254 + firewall_state: + description: + - The current state of the IP address firewall for this Data Lake Store account. + type: str + returned: always + sample: Enabled + id: + description: + - The resource identifier. + returned: always + type: str + sample: /subscriptions/{subscriptionId}/resourceGroups/myResourceGroup/providers/Microsoft.DataLakeStore/accounts/testaccount + identity: + description: + - The Key Vault encryption identity, if any. + type: complex + contains: + type: + description: + - The type of encryption being used. + type: str + sample: SystemAssigned + principal_id: + description: + - The principal identifier associated with the encryption. + type: str + sample: 00000000-0000-0000-0000-000000000000 + tenant_id: + description: + - The tenant identifier associated with the encryption. + type: str + sample: 00000000-0000-0000-0000-000000000000 + last_modified_time: + description: + - The account last modified time. + returned: always + type: str + sample: '2020-01-01T00:00:00.000000+00:00' + location: + description: + - The resource location. + returned: always + type: str + sample: westeurope + name: + description: + - The resource name. + returned: always + type: str + sample: testaccount + new_tier: + description: + - The commitment tier to use for next month. + type: str + returned: always + sample: Consumption + provisioning_state: + description: + - The provisioning status of the Data Lake Store account. + returned: always + type: str + sample: Succeeded + state: + description: + - The state of the Data Lake Store account. + returned: always + type: str + sample: Active + tags: + description: + - The resource tags. + returned: always + type: dict + sample: { "tag1":"abc" } + trusted_id_providers: + description: + - The current state of the trusted identity provider feature for this Data Lake Store account. + type: list + returned: always + contains: + id: + description: + - The resource identifier. + type: str + name: + description: + - The resource name. + type: str + type: + description: + - The resource type. + type: str + id_provider: + description: + - The URL of this trusted identity provider. + type: str + trusted_id_provider_state: + description: + - The list of trusted identity providers associated with this Data Lake Store account. + type: str + returned: always + sample: Enabled + type: + description: + - The resource type. + returned: always + type: str + sample: Microsoft.DataLakeStore/accounts + virtual_network_rules: + description: + - The list of virtual network rules associated with this Data Lake Store account. + type: list + returned: always + contains: + name: + description: + - The resource name. + type: str + sample: Rule Name + subnet_id: + description: + - The resource identifier for the subnet. + type: str + sample: /subscriptions/{subscriptionId}/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/vnet/subnets/default + +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +import datetime + +try: + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + +firewall_rules_item = dict( + name=dict(type='str', required=True), + start_ip_address=dict(type='str', required=True), + end_ip_address=dict(type='str', required=True) +) + +virtual_network_rules_item = dict( + name=dict(type='str', required=True), + subnet_id=dict(type='str', required=True) +) + + +class AzureRMDatalakeStore(AzureRMModuleBase): + def __init__(self): + + self.module_arg_spec = dict( + default_group=dict(type='str'), + encryption_config=dict( + type='dict', + options=dict( + type=dict(type='str', choices=['UserManaged', 'ServiceManaged'], required=True), + key_vault_meta_info=dict( + type='dict', + no_log=True, + options=dict( + key_vault_resource_id=dict(type='str', required=True), + encryption_key_name=dict(type='str', required=True), + encryption_key_version=dict(type='str', no_log=True, required=True) + ) + ), + ) + ), + encryption_state=dict(type='str', choices=['Enabled', 'Disabled']), + firewall_allow_azure_ips=dict(type='str', choices=['Enabled', 'Disabled']), + firewall_rules=dict( + type='list', + elements='dict', + options=firewall_rules_item + ), + firewall_state=dict(type='str', choices=['Enabled', 'Disabled']), + identity=dict(type='str', choices=['SystemAssigned']), + location=dict(type='str'), + name=dict(type='str', required=True), + new_tier=dict(type='str', choices=['Consumption', 'Commitment_1TB', 'Commitment_10TB', 'Commitment_100TB', + 'Commitment_500TB', 'Commitment_1PB', 'Commitment_5PB']), + resource_group=dict(type='str', required=True, aliases=['resource_group_name']), + state=dict(type='str', default='present', choices=['present', 'absent']), + tags=dict(type='dict'), + virtual_network_rules=dict( + type='list', + elements='dict', + options=virtual_network_rules_item + ), + ) + + self.state = None + self.name = None + self.resource_group = None + self.location = None + self.tags = None + self.new_tier = None + self.default_group = None + self.encryption_config = dict() + self.encryption_config_model = None + self.encryption_state = None + self.firewall_state = None + self.firewall_allow_azure_ips = None + self.firewall_rules = None + self.firewall_rules_model = None + self.virtual_network_rules = None + self.virtual_network_rules_model = None + self.identity = None + self.identity_model = None + + self.results = dict(changed=False) + self.account_dict = None + + super(AzureRMDatalakeStore, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=False, + supports_tags=False) + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + if self.encryption_config: + key_vault_meta_info_model = None + if self.encryption_config.get('key_vault_meta_info'): + key_vault_meta_info_model = self.datalake_store_models.KeyVaultMetaInfo( + key_vault_resource_id=self.encryption_config.get('key_vault_meta_info').get('key_vault_resource_id'), + encryption_key_name=self.encryption_config.get('key_vault_meta_info').get('encryption_key_name'), + encryption_key_version=self.encryption_config.get('key_vault_meta_info').get('encryption_key_version') + ) + self.encryption_config_model = self.datalake_store_models.EncryptionConfig(type=self.encryption_config.get('type'), + key_vault_meta_info=key_vault_meta_info_model) + + if self.identity is not None: + self.identity_model = self.datalake_store_models.EncryptionIdentity( + type=self.identity + ) + + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + self.location = resource_group.location + + self.account_dict = self.get_datalake_store() + + if self.account_dict is not None: + self.results['state'] = self.account_dict + else: + self.results['state'] = dict() + + if self.state == 'present': + if not self.account_dict: + self.results['state'] = self.create_datalake_store() + else: + self.results['state'] = self.update_datalake_store() + else: + self.delete_datalake_store() + self.results['state'] = dict(state='Deleted') + + return self.results + + def check_name_availability(self): + self.log('Checking name availability for {0}'.format(self.name)) + try: + response = self.datalake_store_client.accounts.check_name_availability(self.location, parameters={'name': self.name}) + except Exception as e: + self.log('Error attempting to validate name.') + self.fail("Error checking name availability: {0}".format(str(e))) + if not response.name_available: + self.log('Error name not available.') + self.fail("{0} - {1}".format(response.message, response.reason)) + + def create_datalake_store(self): + self.log("Creating datalake store {0}".format(self.name)) + + if not self.location: + self.fail('Parameter error: location required when creating a datalake store account.') + + self.check_name_availability() + self.results['changed'] = True + + if self.check_mode: + account_dict = dict( + name=self.name, + resource_group=self.resource_group, + location=self.location + ) + return account_dict + + if self.firewall_rules is not None: + self.firewall_rules_model = list() + for rule in self.firewall_rules: + rule_model = self.datalake_store_models.CreateFirewallRuleWithAccountParameters( + name=rule.get('name'), + start_ip_address=rule.get('start_ip_address'), + end_ip_address=rule.get('end_ip_address')) + self.firewall_rules_model.append(rule_model) + + if self.virtual_network_rules is not None: + self.virtual_network_rules_model = list() + for vnet_rule in self.virtual_network_rules: + vnet_rule_model = self.datalake_store_models.CreateVirtualNetworkRuleWithAccountParameters( + name=vnet_rule.get('name'), + subnet_id=vnet_rule.get('subnet_id')) + self.virtual_network_rules_model.append(vnet_rule_model) + + parameters = self.datalake_store_models.CreateDataLakeStoreAccountParameters( + default_group=self.default_group, + encryption_config=self.encryption_config_model, + encryption_state=self.encryption_state, + firewall_allow_azure_ips=self.firewall_allow_azure_ips, + firewall_rules=self.firewall_rules_model, + firewall_state=self.firewall_state, + identity=self.identity_model, + location=self.location, + new_tier=self.new_tier, + tags=self.tags, + virtual_network_rules=self.virtual_network_rules_model + ) + + self.log(str(parameters)) + try: + poller = self.datalake_store_client.accounts.begin_create(self.resource_group, self.name, parameters) + self.get_poller_result(poller) + except Exception as e: + self.log('Error creating datalake store.') + self.fail("Failed to create datalake store: {0}".format(str(e))) + + return self.get_datalake_store() + + def update_datalake_store(self): + self.log("Updating datalake store {0}".format(self.name)) + + parameters = self.datalake_store_models.UpdateDataLakeStoreAccountParameters() + + if self.tags: + update_tags, self.account_dict['tags'] = self.update_tags(self.account_dict['tags']) + if update_tags: + self.results['changed'] = True + parameters.tags = self.account_dict['tags'] + + if self.new_tier and self.account_dict.get('new_tier') != self.new_tier: + self.results['changed'] = True + parameters.new_tier = self.new_tier + + if self.default_group and self.account_dict.get('default_group') != self.default_group: + self.results['changed'] = True + parameters.default_group = self.default_group + + if self.encryption_state and self.account_dict.get('encryption_state') != self.encryption_state: + self.fail("Encryption type cannot be updated.") + + if self.encryption_config: + if ( + self.encryption_config.get('type') == 'UserManaged' + and self.encryption_config.get('key_vault_meta_info') != self.account_dict.get('encryption_config').get('key_vault_meta_info') + ): + self.results['changed'] = True + key_vault_meta_info_model = self.datalake_store_models.UpdateKeyVaultMetaInfo( + encryption_key_version=self.encryption_config.get('key_vault_meta_info').get('encryption_key_version') + ) + encryption_config_model = self.datalake_store_models.UpdateEncryptionConfig = key_vault_meta_info_model + parameters.encryption_config = encryption_config_model + + if self.firewall_state and self.account_dict.get('firewall_state') != self.firewall_state: + self.results['changed'] = True + parameters.firewall_state = self.firewall_state + + if self.firewall_allow_azure_ips and self.account_dict.get('firewall_allow_azure_ips') != self.firewall_allow_azure_ips: + self.results['changed'] = True + parameters.firewall_allow_azure_ips = self.firewall_allow_azure_ips + + if self.firewall_rules is not None: + if not self.compare_lists(self.firewall_rules, self.account_dict.get('firewall_rules')): + self.firewall_rules_model = list() + for rule in self.firewall_rules: + rule_model = self.datalake_store_models.UpdateFirewallRuleWithAccountParameters( + name=rule.get('name'), + start_ip_address=rule.get('start_ip_address'), + end_ip_address=rule.get('end_ip_address')) + self.firewall_rules_model.append(rule_model) + self.results['changed'] = True + parameters.firewall_rules = self.firewall_rules_model + + if self.virtual_network_rules is not None: + if not self.compare_lists(self.virtual_network_rules, self.account_dict.get('virtual_network_rules')): + self.virtual_network_rules_model = list() + for vnet_rule in self.virtual_network_rules: + vnet_rule_model = self.datalake_store_models.UpdateVirtualNetworkRuleWithAccountParameters( + name=vnet_rule.get('name'), + subnet_id=vnet_rule.get('subnet_id')) + self.virtual_network_rules_model.append(vnet_rule_model) + self.results['changed'] = True + parameters.virtual_network_rules = self.virtual_network_rules_model + + if self.identity_model is not None: + self.results['changed'] = True + parameters.identity = self.identity_model + + self.log(str(parameters)) + if self.results['changed']: + try: + poller = self.datalake_store_client.accounts.begin_update(self.resource_group, self.name, parameters) + self.get_poller_result(poller) + except Exception as e: + self.log('Error creating datalake store.') + self.fail("Failed to create datalake store: {0}".format(str(e))) + + return self.get_datalake_store() + + def delete_datalake_store(self): + self.log('Delete datalake store {0}'.format(self.name)) + + self.results['changed'] = True if self.account_dict is not None else False + if not self.check_mode and self.account_dict is not None: + try: + status = self.datalake_store_client.accounts.begin_delete(self.resource_group, self.name) + self.log("delete status: ") + self.log(str(status)) + except Exception as e: + self.fail("Failed to delete datalake store: {0}".format(str(e))) + + return True + + def get_datalake_store(self): + self.log('Get properties for datalake store {0}'.format(self.name)) + datalake_store_obj = None + account_dict = None + + try: + datalake_store_obj = self.datalake_store_client.accounts.get(self.resource_group, self.name) + except ResourceNotFoundError: + pass + + if datalake_store_obj: + account_dict = self.account_obj_to_dict(datalake_store_obj) + + return account_dict + + def account_obj_to_dict(self, datalake_store_obj): + account_dict = dict( + account_id=datalake_store_obj.account_id, + creation_time=datalake_store_obj.creation_time, + current_tier=datalake_store_obj.current_tier, + default_group=datalake_store_obj.default_group, + encryption_config=None, + encryption_provisioning_state=datalake_store_obj.encryption_provisioning_state, + encryption_state=datalake_store_obj.encryption_state, + endpoint=datalake_store_obj.endpoint, + firewall_allow_azure_ips=datalake_store_obj.firewall_allow_azure_ips, + firewall_rules=None, + firewall_state=datalake_store_obj.firewall_state, + id=datalake_store_obj.id, + identity=None, + last_modified_time=datalake_store_obj.last_modified_time, + location=datalake_store_obj.location, + name=datalake_store_obj.name, + new_tier=datalake_store_obj.new_tier, + provisioning_state=datalake_store_obj.provisioning_state, + state=datalake_store_obj.state, + tags=datalake_store_obj.tags, + trusted_id_providers=datalake_store_obj.trusted_id_providers, + trusted_id_provider_state=datalake_store_obj.trusted_id_provider_state, + type=datalake_store_obj.type, + virtual_network_rules=None + ) + + account_dict['firewall_rules'] = list() + if datalake_store_obj.firewall_rules: + for rule in datalake_store_obj.firewall_rules: + rule_item = dict( + name=rule.name, + start_ip_address=rule.start_ip_address, + end_ip_address=rule.end_ip_address + ) + account_dict['firewall_rules'].append(rule_item) + + account_dict['virtual_network_rules'] = list() + if datalake_store_obj.virtual_network_rules: + for vnet_rule in datalake_store_obj.virtual_network_rules: + vnet_rule_item = dict( + name=vnet_rule.name, + subnet_id=vnet_rule.subnet_id + ) + account_dict['virtual_network_rules'].append(vnet_rule_item) + + if datalake_store_obj.identity: + account_dict['identity'] = dict( + type=datalake_store_obj.identity.type, + principal_id=datalake_store_obj.identity.principal_id, + tenant_id=datalake_store_obj.identity.tenant_id + ) + + if datalake_store_obj.encryption_config: + if datalake_store_obj.encryption_config.key_vault_meta_info: + account_dict['encryption_config'] = dict( + key_vault_meta_info=dict( + key_vault_resource_id=datalake_store_obj.encryption_config.key_vault_meta_info.key_vault_resource_id, + encryption_key_name=datalake_store_obj.encryption_config.key_vault_meta_info.encryption_key_name, + encryption_key_version=datalake_store_obj.encryption_config.key_vault_meta_info.encryption_key_version + ) + ) + + return account_dict + + def compare_lists(self, list1, list2): + if len(list1) != len(list2): + return False + for element in list1: + if element not in list2: + return False + return True + + +def main(): + AzureRMDatalakeStore() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_datalakestore_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_datalakestore_info.py new file mode 100644 index 000000000..842330cc0 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_datalakestore_info.py @@ -0,0 +1,467 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 David Duque Hernández, (@next-davidduquehernandez) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_datalakestore_info +version_added: "1.4.0" +short_description: Get Azure Data Lake Store info +description: + - Get Azure Data Lake Store info. + +options: + resource_group: + description: + - The name of the Azure resource group. + type: str + aliases: + - resource_group_name + name: + description: + - The name of the Data Lake Store account. + type: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - David Duque Hernández (@next-davidduquehernandez) + +''' + +EXAMPLES = ''' + - name: Get Azure Data Lake Store info from resource group 'myResourceGroup' and name 'myDataLakeStore' + azure_rm_datalakestore_info: + resource_group: myResourceGroup + name: myDataLakeStore + + - name: Get Azure Data Lake Store info from resource group 'myResourceGroup' + azure_rm_datalakestore_info: + resource_group: myResourceGroup + + - name: Get Azure Data Lake Store info + azure_rm_datalakestore_info: +''' + +RETURN = ''' +datalake: + description: + - A list of dictionaries containing facts for Azure Data Lake Store. + returned: always + type: complex + contains: + account_id: + description: + - The unique identifier associated with this Data Lake Store account. + returned: always + type: str + sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + creation_time: + description: + - The account creation time. + returned: always + type: str + sample: '2020-01-01T00:00:00.000000+00:00' + current_tier: + description: + - The commitment tier in use for the current month. + type: str + sample: Consumption + default_group: + description: + - The default owner group for all new folders and files created in the Data Lake Store account. + type: str + sample: null + encryption_config: + description: + - The Key Vault encryption configuration. + type: complex + contains: + type: + description: + - The type of encryption configuration being used. + type: str + returned: always + sample: ServiceManaged + key_vault_meta_info: + description: + - The Key Vault information for connecting to user managed encryption keys. + type: complex + contains: + key_vault_resource_id: + description: + - The resource identifier for the user managed Key Vault being used to encrypt. + type: str + returned: always + sample: /subscriptions/{subscriptionId}/resourceGroups/myRG/providers/Microsoft.KeyVault/vaults/testkv + encryption_key_name: + description: + - The name of the user managed encryption key. + type: str + returned: always + sample: KeyName + encryption_key_version: + description: + - The version of the user managed encryption key. + type: str + returned: always + sample: 86a1e3b7406f45afa0d54e21eff47e39 + encryption_provisioning_state: + description: + - The current state of encryption provisioning for this Data Lake Store account. + type: str + sample: Succeeded + encryption_state: + description: + - The current state of encryption for this Data Lake Store account. + type: str + sample: Enabled + endpoint: + description: + - The full CName endpoint for this account. + returned: always + type: str + sample: testaccount.azuredatalakestore.net + firewall_allow_azure_ips: + description: + - The current state of allowing or disallowing IPs originating within Azure through the firewall. + type: str + sample: Disabled + firewall_rules: + description: + - The list of firewall rules associated with this Data Lake Store account. + type: list + contains: + name: + description: + - The resource name. + type: str + returned: always + sample: Example Name + start_ip_address: + description: + - The start IP address for the firewall rule. + - This can be either ipv4 or ipv6. + - Start and End should be in the same protocol. + type: str + returned: always + sample: 192.168.1.1 + end_ip_address: + description: + - The end IP address for the firewall rule. + - This can be either ipv4 or ipv6. + - Start and End should be in the same protocol. + type: str + returned: always + sample: 192.168.1.254 + firewall_state: + description: + - The current state of the IP address firewall for this Data Lake Store account. + type: str + sample: Enabled + id: + description: + - The resource identifier. + returned: always + type: str + sample: /subscriptions/{subscriptionId}/resourceGroups/myResourceGroup/providers/Microsoft.DataLakeStore/accounts/testaccount + identity: + description: + - The Key Vault encryption identity, if any. + type: complex + contains: + type: + description: + - The type of encryption being used. + type: str + sample: SystemAssigned + principal_id: + description: + - The principal identifier associated with the encryption. + type: str + sample: 00000000-0000-0000-0000-000000000000 + tenant_id: + description: + - The tenant identifier associated with the encryption. + type: str + sample: 00000000-0000-0000-0000-000000000000 + last_modified_time: + description: + - The account last modified time. + returned: always + type: str + sample: '2020-01-01T00:00:00.000000+00:00' + location: + description: + - The resource location. + returned: always + type: str + sample: westeurope + name: + description: + - The resource name. + returned: always + type: str + sample: testaccount + new_tier: + description: + - The commitment tier to use for next month. + type: str + sample: Consumption + provisioning_state: + description: + - The provisioning status of the Data Lake Store account. + returned: always + type: str + sample: Succeeded + state: + description: + - The state of the Data Lake Store account. + returned: always + type: str + sample: Active + tags: + description: + - The resource tags. + returned: always + type: dict + sample: { "tag1":"abc" } + trusted_id_providers: + description: + - The current state of the trusted identity provider feature for this Data Lake Store account. + type: list + contains: + id: + description: + - The resource identifier. + type: str + name: + description: + - The resource name. + type: str + type: + description: + - The resource type. + type: str + id_provider: + description: + - The URL of this trusted identity provider. + type: str + trusted_id_provider_state: + description: + - The list of trusted identity providers associated with this Data Lake Store account. + type: str + sample: Enabled + type: + description: + - The resource type. + returned: always + type: str + sample: Microsoft.DataLakeStore/accounts + virtual_network_rules: + description: + - The list of virtual network rules associated with this Data Lake Store account. + type: list + contains: + name: + description: + - The resource name. + type: str + sample: Rule Name + subnet_id: + description: + - The resource identifier for the subnet. + type: str + sample: /subscriptions/{subscriptionId}/resourceGroups/myRG/providers/Microsoft.Network/virtualNetworks/vnet/subnets/default +''' + +try: + from azure.core.exceptions import ResourceNotFoundError +except Exception: + # This is handled in azure_rm_common + pass + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + + +class AzureRMDatalakeStoreInfo(AzureRMModuleBase): + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type='str'), + resource_group=dict(type='str', aliases=['resource_group_name']) + ) + + self.results = dict( + changed=False, + datalake=[] + ) + + self.name = None + self.resource_group = None + + super(AzureRMDatalakeStoreInfo, self).__init__(self.module_arg_spec, + supports_check_mode=True, + supports_tags=False) + + def exec_module(self, **kwargs): + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.name and not self.resource_group: + self.fail("Parameter error: resource group required when filtering by name.") + + results = [] + if self.name: + results = self.get_datalake_store() + elif self.resource_group: + results = self.list_resource_group() + else: + results = self.list_all() + + self.results['datalake'] = results + return self.results + + def get_datalake_store(self): + self.log('Get properties for datalake store {0}'.format(self.name)) + datalake_store_obj = None + + try: + datalake_store_obj = self.datalake_store_client.accounts.get(self.resource_group, self.name) + except ResourceNotFoundError: + pass + + if datalake_store_obj: + return [self.account_obj_to_dict(datalake_store_obj)] + + return list() + + def list_resource_group(self): + self.log('Get basic properties for datalake store in resource group {0}'.format(self.resource_group)) + datalake_store_obj = None + results = list() + + try: + datalake_store_obj = self.datalake_store_client.accounts.list_by_resource_group(self.resource_group) + except Exception: + pass + + if datalake_store_obj: + for datalake_item in datalake_store_obj: + results.append(self.account_obj_to_dict_basic(datalake_item)) + return results + + return list() + + def list_all(self): + self.log('Get basic properties for all datalake store') + datalake_store_obj = None + results = list() + + try: + datalake_store_obj = self.datalake_store_client.accounts.list() + except Exception: + pass + + if datalake_store_obj: + for datalake_item in datalake_store_obj: + results.append(self.account_obj_to_dict_basic(datalake_item)) + return results + + return list() + + def account_obj_to_dict(self, datalake_store_obj): + account_dict = dict( + account_id=datalake_store_obj.account_id, + creation_time=datalake_store_obj.creation_time, + current_tier=datalake_store_obj.current_tier, + default_group=datalake_store_obj.default_group, + encryption_config=dict(type=datalake_store_obj.encryption_config.type, + key_vault_meta_info=None), + encryption_provisioning_state=datalake_store_obj.encryption_provisioning_state, + encryption_state=datalake_store_obj.encryption_state, + endpoint=datalake_store_obj.endpoint, + firewall_allow_azure_ips=datalake_store_obj.firewall_allow_azure_ips, + firewall_rules=None, + firewall_state=datalake_store_obj.firewall_state, + id=datalake_store_obj.id, + identity=None, + last_modified_time=datalake_store_obj.last_modified_time, + location=datalake_store_obj.location, + name=datalake_store_obj.name, + new_tier=datalake_store_obj.new_tier, + provisioning_state=datalake_store_obj.provisioning_state, + state=datalake_store_obj.state, + tags=datalake_store_obj.tags, + trusted_id_providers=datalake_store_obj.trusted_id_providers, + trusted_id_provider_state=datalake_store_obj.trusted_id_provider_state, + type=datalake_store_obj.type, + virtual_network_rules=None + ) + + account_dict['firewall_rules'] = list() + for rule in datalake_store_obj.firewall_rules: + rule_item = dict( + name=rule.name, + start_ip_address=rule.start_ip_address, + end_ip_address=rule.end_ip_address + ) + account_dict['firewall_rules'].append(rule_item) + + account_dict['virtual_network_rules'] = list() + for vnet_rule in datalake_store_obj.virtual_network_rules: + vnet_rule_item = dict( + name=vnet_rule.name, + subnet_id=vnet_rule.subnet_id + ) + account_dict['virtual_network_rules'].append(vnet_rule_item) + + if datalake_store_obj.identity: + account_dict['identity'] = dict( + type=datalake_store_obj.identity.type, + principal_id=datalake_store_obj.identity.principal_id, + tenant_id=datalake_store_obj.identity.tenant_id + ) + + if datalake_store_obj.encryption_config.key_vault_meta_info: + account_dict['encryption_config'] = dict( + key_vault_meta_info=dict( + key_vault_resource_id=datalake_store_obj.encryption_config.key_vault_meta_info.key_vault_resource_id, + encryption_key_name=datalake_store_obj.encryption_config.key_vault_meta_info.encryption_key_name, + encryption_key_version=datalake_store_obj.encryption_config.key_vault_meta_info.encryption_key_version + ) + ) + + return account_dict + + def account_obj_to_dict_basic(self, datalake_store_obj): + account_dict = dict( + account_id=datalake_store_obj.account_id, + creation_time=datalake_store_obj.creation_time, + endpoint=datalake_store_obj.endpoint, + id=datalake_store_obj.id, + last_modified_time=datalake_store_obj.last_modified_time, + location=datalake_store_obj.location, + name=datalake_store_obj.name, + provisioning_state=datalake_store_obj.provisioning_state, + state=datalake_store_obj.state, + tags=datalake_store_obj.tags, + type=datalake_store_obj.type + ) + + return account_dict + + +def main(): + AzureRMDatalakeStoreInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_ddosprotectionplan.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_ddosprotectionplan.py new file mode 100644 index 000000000..0ae5e82f8 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_ddosprotectionplan.py @@ -0,0 +1,241 @@ +#!/usr/bin/python +# +# Copyright (c) 2021 Praveen Ghuge (@praveenghuge), Karl Dasan (@ikarldasan) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type +DOCUMENTATION = ''' +--- +module: azure_rm_ddosprotectionplan +version_added: "1.7.0" +short_description: Manage DDoS protection plan +description: + - Create, update and delete instance of DDoS protection plan. +options: + resource_group: + description: + - Name of the resource group to which the resource belongs. + required: true + type: str + name: + description: + - Unique name of the app service plan to create or update. + required: true + type: str + location: + description: + - Resource location. If not set, location from the resource group will be used as default. + type: str + state: + description: + - Assert the state of the DDoS protection plan. + - Use C(present) to create or update an DDoS protection plan and C(absent) to delete it. + type: str + default: present + choices: + - absent + - present + log_path: + description: + - parent argument. + type: str + log_mode: + description: + - parent argument. + type: str +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags +author: + - Praveen Ghuge (@praveenghuge) + - Karl Dasan (@ikarldasan) +''' +EXAMPLES = ''' +- name: "Create DDoS protection plan" + azure_rm_ddosprotectionplan: + resource_group: rg + location: eastus + name: ddosplan +- name: Delete DDoS protection plan + azure_rm_ddosprotectionplan: + resource_group: rg + name: ddosplan + state: absent +''' + +RETURN = ''' +state: + description: + - Current state of the DDoS protection plan. + returned: always + type: dict + sample: { + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/ddosProtectionPlans/ddosplan", + "location": "eastus", + "name": "ddosplan", + "etag": "W/60ac0480-44dd-4881-a2ed-680d20b3978e", + "provisioning_state": "Succeeded", + "resource_guid": null, + "type": "Microsoft.Network/ddosProtectionPlans", + "tags": {"a": "b"}, + "virtual_networks": [] + } +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.mgmt.network import NetworkManagementClient +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureDDoSProtectionPlan(AzureRMModuleBase): + + def __init__(self): + # define user inputs from playbook + + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True), + name=dict(type='str', required=True), + location=dict(type='str'), + state=dict(choices=['present', 'absent'], + default='present', type='str'), + ) + + self.resource_group = None + self.name = None + self.location = None + self.state = None + self.tags = None + self.log_path = None + self.results = dict( + changed=False, + state=dict() + ) + + super(AzureDDoSProtectionPlan, self).__init__(self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + self.results['check_mode'] = self.check_mode + + # retrieve resource group to make sure it exists + self.get_resource_group(self.resource_group) + + results = dict() + changed = False + + try: + self.log('Fetching DDoS protection plan {0}'.format(self.name)) + ddos_protection_plan = self.network_client.ddos_protection_plans.get( + self.resource_group, self.name) + + results = ddos_protection_plan_to_dict(ddos_protection_plan) + + # don't change anything if creating an existing zone, but change if deleting it + if self.state == 'present': + changed = False + + update_tags, results['tags'] = self.update_tags( + results['tags']) + if update_tags: + changed = True + + elif self.state == 'absent': + changed = True + + except ResourceNotFoundError: + # the DDoS protection plan does not exist so create it + if self.state == 'present': + changed = True + else: + # you can't delete what is not there + changed = False + + self.results['changed'] = changed + self.results['state'] = results + + # return the results if you are only gathering information + if self.check_mode: + return self.results + + if changed: + if self.state == "present": + self.results['state'] = self.create_or_update_ddos_protection_plan( + self.module.params) + elif self.state == "absent": + # delete DDoS protection plan + self.delete_ddos_protection_plan() + self.results['state']['status'] = 'Deleted' + + return self.results + + def create_or_update_ddos_protection_plan(self, params): + ''' + Create or update DDoS protection plan. + :return: create or update DDoS protection plan instance state dictionary + ''' + self.log("create or update DDoS protection plan {0}".format(self.name)) + try: + poller = self.network_client.ddos_protection_plans.begin_create_or_update( + resource_group_name=params.get("resource_group"), + ddos_protection_plan_name=params.get("name"), + parameters=params) + result = self.get_poller_result(poller) + self.log("Response : {0}".format(result)) + except Exception as ex: + self.fail("Failed to create DDoS protection plan {0} in resource group {1}: {2}".format( + self.name, self.resource_group, str(ex))) + return ddos_protection_plan_to_dict(result) + + def delete_ddos_protection_plan(self): + ''' + Deletes specified DDoS protection plan + :return True + ''' + self.log("Deleting the DDoS protection plan {0}".format(self.name)) + try: + poller = self.network_client.ddos_protection_plans.begin_delete( + self.resource_group, self.name) + result = self.get_poller_result(poller) + except ResourceNotFoundError as e: + self.log('Error attempting to delete DDoS protection plan.') + self.fail( + "Error deleting the DDoS protection plan : {0}".format(str(e))) + return result + + +def ddos_protection_plan_to_dict(item): + # turn DDoS protection plan object into a dictionary (serialization) + ddos_protection_plan = item.as_dict() + + result = dict( + additional_properties=ddos_protection_plan.get('additional_properties', None), + id=ddos_protection_plan.get('id', None), + name=ddos_protection_plan.get('name', None), + type=ddos_protection_plan.get('type', None), + location=ddos_protection_plan.get('location', None), + tags=ddos_protection_plan.get('tags', None), + etag=ddos_protection_plan.get('etag', None), + resource_guid=ddos_protection_plan.get('resource_guid', None), + provisioning_state=ddos_protection_plan.get('provisioning_state', None), + virtual_networks=ddos_protection_plan.get('virtual_networks', None) + ) + return result + + +def main(): + AzureDDoSProtectionPlan() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_ddosprotectionplan_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_ddosprotectionplan_info.py new file mode 100644 index 000000000..70b37ac73 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_ddosprotectionplan_info.py @@ -0,0 +1,163 @@ +#!/usr/bin/python +# +# Copyright (c) 2021 Praveen Ghuge (@praveenghuge), Karl Dasan (@ikarldasan) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_ddosprotectionplan_info +version_added: "1.7.0" +short_description: Get Azure DDoS protection plan +description: + - Get facts of Azure DDoS protection plan. +options: + resource_group: + description: + - The name of the resource group. + type: str + name: + description: + - The name of the DDoS protection plan. + type: str +extends_documentation_fragment: +- azure.azcollection.azure +author: + - Praveen Ghuge (@praveenghuge) + - Karl Dasan (@ikarldasan) +''' + + +EXAMPLES = ''' + - name: Get facts of specific DDoS protection plan + azure_rm_ddosprotectionplan_info: + resource_group: myResourceGroup + name: myDDoSProtectionPlan +''' + +RETURN = ''' +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureDDoSProtectionPlanInfo(AzureRMModuleBase): + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str' + ), + name=dict( + type='str' + ) + ) + # store the results of the module operation + self.results = dict( + changed=False) + self.resource_group = None + self.name = None + self.tags = None + + super(AzureDDoSProtectionPlanInfo, self).__init__( + self.module_arg_spec, supports_check_mode=True, supports_tags=False) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.name is not None: + results = self.get() + elif self.resource_group: + # all the DDoS protection plan listed in that specific resource group + results = self.list_resource_group() + else: + # all the DDoS protection plan listed in the subscription + results = self.list_subscription() + + self.results['ddosprotectionplan'] = [ + self.ddos_protection_plan_to_dict(x) for x in results] + return self.results + + def get(self): + response = None + results = [] + try: + response = self.network_client.ddos_protection_plans.get( + self.resource_group, self.name) + self.log("Response : {0}".format(response)) + except ResourceNotFoundError as e: + self.fail('Could not get info for DDoS protection plan. {0}'.format(str(e))) + + if response and self.has_tags(response.tags, self.tags): + results = [response] + return results + + def list_resource_group(self): + self.log('List items for resource group') + try: + response = self.network_client.ddos_protection_plans.list_by_resource_group( + self.resource_group) + + except ResourceNotFoundError as exc: + self.fail( + "Failed to list for resource group {0} - {1}".format(self.resource_group, str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(item) + return results + + def list_subscription(self): + self.log('List items for subscription') + try: + response = self.network_client.ddos_protection_plans.list() + + except ResourceNotFoundError as exc: + self.fail( + "Failed to list DDoS protection plan in the subscription - {0}".format(str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(item) + return results + + def ddos_protection_plan_to_dict(self, item): + # turn DDoS protection plan object into a dictionary (serialization) + ddos_protection_plan = item.as_dict() + + result = dict( + additional_properties=ddos_protection_plan.get('additional_properties', None), + id=ddos_protection_plan.get('id', None), + name=ddos_protection_plan.get('name', None), + type=ddos_protection_plan.get('type', None), + location=ddos_protection_plan.get('location', None), + tags=ddos_protection_plan.get('tags', None), + etag=ddos_protection_plan.get('etag', None), + resource_guid=ddos_protection_plan.get('resource_guid', None), + provisioning_state=ddos_protection_plan.get('provisioning_state', None), + virtual_networks=ddos_protection_plan.get('virtual_networks', None) + ) + return result + + +def main(): + AzureDDoSProtectionPlanInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_deployment.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_deployment.py new file mode 100644 index 000000000..2b6a23a52 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_deployment.py @@ -0,0 +1,715 @@ +#!/usr/bin/python +# +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_deployment + +version_added: "0.1.0" + +short_description: Create or destroy Azure Resource Manager template deployments + +description: + - Create or destroy Azure Resource Manager template deployments via the Azure SDK for Python. + - You can find some quick start templates in GitHub here U(https://github.com/azure/azure-quickstart-templates). + - For more information on Azure Resource Manager templates see U(https://azure.microsoft.com/en-us/documentation/articles/resource-group-template-deploy/). + +options: + resource_group: + description: + - The resource group name to use or create to host the deployed template. + required: true + aliases: + - resource_group_name + name: + description: + - The name of the deployment to be tracked in the resource group deployment history. + - Re-using a deployment name will overwrite the previous value in the resource group's deployment history. + default: ansible-arm + aliases: + - deployment_name + location: + description: + - The geo-locations in which the resource group will be located. + default: westus + deployment_mode: + description: + - In incremental mode, resources are deployed without deleting existing resources that are not included in the template. + - In complete mode resources are deployed and existing resources in the resource group not included in the template are deleted. + default: incremental + choices: + - complete + - incremental + template: + description: + - A hash containing the templates inline. This parameter is mutually exclusive with I(template_link). + - Either I(template) or I(template_link) is required if I(state=present). + type: dict + template_link: + description: + - Uri of file containing the template body. This parameter is mutually exclusive with I(template). + - Either I(template) or I(template_link) is required if I(state=present). + parameters: + description: + - A hash of all the required template variables for the deployment template. This parameter is mutually exclusive with I(parameters_link). + - Either I(parameters_link) or I(parameters) is required if I(state=present). + type: dict + parameters_link: + description: + - Uri of file containing the parameters body. This parameter is mutually exclusive with I(parameters). + - Either I(parameters_link) or I(parameters) is required if I(state=present). + wait_for_deployment_completion: + description: + - Whether or not to block until the deployment has completed. + type: bool + default: 'yes' + wait_for_deployment_polling_period: + description: + - Time (in seconds) to wait between polls when waiting for deployment completion. + default: 10 + state: + description: + - If I(state=present), template will be created. + - If I(state=present) and deployment exists, it will be updated. + - If I(state=absent), the resource group will be removed. + default: present + choices: + - present + - absent + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - David Justice (@devigned) + - Laurent Mazuel (@lmazuel) + - Andre Price (@obsoleted) + +''' + +EXAMPLES = ''' +# Destroy a template deployment +- name: Destroy Azure Deploy + azure_rm_deployment: + resource_group: myResourceGroup + name: myDeployment + state: absent + +# Create or update a template deployment based on uris using parameter and template links +- name: Create Azure Deploy + azure_rm_deployment: + resource_group: myResourceGroup + name: myDeployment + template_link: 'https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/101-vm-simple-linux/azuredeploy.json' + parameters_link: 'https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/101-vm-simple-linux/azuredeploy.parameters.json' + +# Create or update a template deployment based on a uri to the template and parameters specified inline. +# This deploys a VM with SSH support for a given public key, then stores the result in 'azure_vms'. The result is then +# used to create a new host group. This host group is then used to wait for each instance to respond to the public IP SSH. + +- name: Create Azure Deploy + azure_rm_deployment: + resource_group: myResourceGroup + name: myDeployment + parameters: + newStorageAccountName: + value: devopsclestorage1 + adminUsername: + value: devopscle + dnsNameForPublicIP: + value: devopscleazure + location: + value: West US + vmSize: + value: Standard_A2 + vmName: + value: ansibleSshVm + sshKeyData: + value: YOUR_SSH_PUBLIC_KEY + template_link: 'https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/101-vm-sshkey/azuredeploy.json' + register: azure +- name: Add new instance to host group + add_host: + hostname: "{{ item['ips'][0].public_ip }}" + groupname: azure_vms + loop: "{{ azure.deployment.instances }}" + +# Deploy an Azure WebApp running a hello world'ish node app +- name: Create Azure WebApp Deployment at http://devopscleweb.azurewebsites.net/hello.js + azure_rm_deployment: + resource_group: myResourceGroup + name: myDeployment + parameters: + repoURL: + value: 'https://github.com/devigned/az-roadshow-oss.git' + siteName: + value: devopscleweb + hostingPlanName: + value: someplan + siteLocation: + value: westus + sku: + value: Standard + template_link: 'https://raw.githubusercontent.com/azure/azure-quickstart-templates/master/201-web-app-github-deploy/azuredeploy.json' + +# Create or update a template deployment based on an inline template and parameters +- name: Create Azure Deploy + azure_rm_deployment: + resource_group: myResourceGroup + name: myDeployment + template: + $schema: "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#" + contentVersion: "1.0.0.0" + parameters: + newStorageAccountName: + type: "string" + metadata: + description: "Unique DNS Name for the Storage Account where the Virtual Machine's disks will be placed." + adminUsername: + type: "string" + metadata: + description: "User name for the Virtual Machine." + adminPassword: + type: "securestring" + metadata: + description: "Password for the Virtual Machine." + dnsNameForPublicIP: + type: "string" + metadata: + description: "Unique DNS Name for the Public IP used to access the Virtual Machine." + ubuntuOSVersion: + type: "string" + defaultValue: "14.04.2-LTS" + allowedValues: + - "12.04.5-LTS" + - "14.04.2-LTS" + - "15.04" + metadata: + description: > + The Ubuntu version for the VM. This will pick a fully patched image of this given Ubuntu version. + Allowed values: 12.04.5-LTS, 14.04.2-LTS, 15.04." + variables: + location: "West US" + imagePublisher: "Canonical" + imageOffer: "UbuntuServer" + OSDiskName: "osdiskforlinuxsimple" + nicName: "myVMNic" + addressPrefix: "192.0.2.0/24" + subnetName: "Subnet" + subnetPrefix: "10.0.0.0/24" + storageAccountType: "Standard_LRS" + publicIPAddressName: "myPublicIP" + publicIPAddressType: "Dynamic" + vmStorageAccountContainerName: "vhds" + vmName: "MyUbuntuVM" + vmSize: "Standard_D1" + virtualNetworkName: "MyVNET" + vnetID: "[resourceId('Microsoft.Network/virtualNetworks',variables('virtualNetworkName'))]" + subnetRef: "[concat(variables('vnetID'),'/subnets/',variables('subnetName'))]" + resources: + - type: "Microsoft.Storage/storageAccounts" + name: "[parameters('newStorageAccountName')]" + apiVersion: "2015-05-01-preview" + location: "[variables('location')]" + properties: + accountType: "[variables('storageAccountType')]" + - apiVersion: "2015-05-01-preview" + type: "Microsoft.Network/publicIPAddresses" + name: "[variables('publicIPAddressName')]" + location: "[variables('location')]" + properties: + publicIPAllocationMethod: "[variables('publicIPAddressType')]" + dnsSettings: + domainNameLabel: "[parameters('dnsNameForPublicIP')]" + - type: "Microsoft.Network/virtualNetworks" + apiVersion: "2015-05-01-preview" + name: "[variables('virtualNetworkName')]" + location: "[variables('location')]" + properties: + addressSpace: + addressPrefixes: + - "[variables('addressPrefix')]" + subnets: + - + name: "[variables('subnetName')]" + properties: + addressPrefix: "[variables('subnetPrefix')]" + - type: "Microsoft.Network/networkInterfaces" + apiVersion: "2015-05-01-preview" + name: "[variables('nicName')]" + location: "[variables('location')]" + dependsOn: + - "[concat('Microsoft.Network/publicIPAddresses/', variables('publicIPAddressName'))]" + - "[concat('Microsoft.Network/virtualNetworks/', variables('virtualNetworkName'))]" + properties: + ipConfigurations: + - + name: "ipconfig1" + properties: + privateIPAllocationMethod: "Dynamic" + publicIPAddress: + id: "[resourceId('Microsoft.Network/publicIPAddresses',variables('publicIPAddressName'))]" + subnet: + id: "[variables('subnetRef')]" + - type: "Microsoft.Compute/virtualMachines" + apiVersion: "2015-06-15" + name: "[variables('vmName')]" + location: "[variables('location')]" + dependsOn: + - "[concat('Microsoft.Storage/storageAccounts/', parameters('newStorageAccountName'))]" + - "[concat('Microsoft.Network/networkInterfaces/', variables('nicName'))]" + properties: + hardwareProfile: + vmSize: "[variables('vmSize')]" + osProfile: + computername: "[variables('vmName')]" + adminUsername: "[parameters('adminUsername')]" + adminPassword: "[parameters('adminPassword')]" + storageProfile: + imageReference: + publisher: "[variables('imagePublisher')]" + offer: "[variables('imageOffer')]" + sku: "[parameters('ubuntuOSVersion')]" + version: "latest" + osDisk: + name: "osdisk" + vhd: + uri: > + [concat('http://',parameters('newStorageAccountName'),'.blob.core.windows.net/',variables('vmStorageAccountContainerName'),'/', + variables('OSDiskName'),'.vhd')] + caching: "ReadWrite" + createOption: "FromImage" + networkProfile: + networkInterfaces: + - + id: "[resourceId('Microsoft.Network/networkInterfaces',variables('nicName'))]" + diagnosticsProfile: + bootDiagnostics: + enabled: "true" + storageUri: "[concat('http://',parameters('newStorageAccountName'),'.blob.core.windows.net')]" + parameters: + newStorageAccountName: + value: devopsclestorage + adminUsername: + value: devopscle + adminPassword: + value: Password1! + dnsNameForPublicIP: + value: devopscleazure +''' + +RETURN = ''' +deployment: + description: Deployment details. + type: complex + returned: always + contains: + group_name: + description: + - Name of the resource group. + type: str + returned: always + sample: myResourceGroup + id: + description: + - The Azure ID of the deployment. + type: str + returned: always + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Resources/deployments/myD + eployment" + instances: + description: + - Provides the public IP addresses for each VM instance. + type: list + returned: always + contains: + ips: + description: + - List of Public IP addresses. + type: list + returned: always + contains: + dns_settings: + description: + - DNS Settings. + type: complex + returned: always + contains: + domain_name_label: + description: + - Domain Name Label. + type: str + returned: always + sample: myvirtualmachine + fqdn: + description: + - Fully Qualified Domain Name. + type: str + returned: always + sample: myvirtualmachine.eastus2.cloudapp.azure.com + id: + description: + - Public IP resource id. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/p + ublicIPAddresses/myPublicIP" + name: + description: + - Public IP resource name. + returned: always + type: str + sample: myPublicIP + public_ip: + description: + - Public IP address value. + returned: always + type: str + sample: 104.209.244.123 + public_ip_allocation_method: + description: + - Public IP allocation method. + returned: always + type: str + sample: Dynamic + vm_name: + description: + - Virtual machine name. + returned: always + type: str + sample: myvirtualmachine + name: + description: + - Name of the deployment. + type: str + returned: always + sample: myDeployment + outputs: + description: + - Dictionary of outputs received from the deployment. + type: complex + returned: always + sample: { "hostname": { "type": "String", "value": "myvirtualmachine.eastus2.cloudapp.azure.com" } } +''' + +import time + +try: + from azure.common.credentials import ServicePrincipalCredentials + import time + import yaml +except ImportError as exc: + IMPORT_ERROR = "Error importing module prerequisites: %s" % exc + +try: + from itertools import chain + from azure.core.exceptions import ResourceNotFoundError + from azure.mgmt.resource.resources import ResourceManagementClient + from azure.mgmt.network import NetworkManagementClient + +except ImportError: + # This is handled in azure_rm_common + pass + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + + +class AzureRMDeploymentManager(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True, aliases=['resource_group_name']), + name=dict(type='str', default="ansible-arm", aliases=['deployment_name']), + state=dict(type='str', default='present', choices=['present', 'absent']), + template=dict(type='dict', default=None), + parameters=dict(type='dict', default=None), + template_link=dict(type='str', default=None), + parameters_link=dict(type='str', default=None), + location=dict(type='str', default="westus"), + deployment_mode=dict(type='str', default='incremental', choices=['complete', 'incremental']), + wait_for_deployment_completion=dict(type='bool', default=True), + wait_for_deployment_polling_period=dict(type='int', default=10) + ) + + mutually_exclusive = [('template', 'template_link'), + ('parameters', 'parameters_link')] + + self.resource_group = None + self.state = None + self.template = None + self.parameters = None + self.template_link = None + self.parameters_link = None + self.location = None + self.deployment_mode = None + self.name = None + self.wait_for_deployment_completion = None + self.wait_for_deployment_polling_period = None + self.tags = None + self.append_tags = None + + self.results = dict( + deployment=dict(), + changed=False, + msg="" + ) + + super(AzureRMDeploymentManager, self).__init__(derived_arg_spec=self.module_arg_spec, + mutually_exclusive=mutually_exclusive, + supports_check_mode=False) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()) + ['append_tags', 'tags']: + setattr(self, key, kwargs[key]) + + if self.state == 'present': + deployment = self.deploy_template() + if deployment is None: + self.results['deployment'] = dict( + name=self.name, + group_name=self.resource_group, + id=None, + outputs=None, + instances=None + ) + else: + self.results['deployment'] = dict( + name=deployment.name, + group_name=self.resource_group, + id=deployment.id, + outputs=deployment.properties.outputs, + instances=self._get_instances(deployment) + ) + + self.results['changed'] = True + self.results['msg'] = 'deployment succeeded' + else: + try: + if self.get_resource_group(self.resource_group): + self.destroy_resource_group() + self.results['changed'] = True + self.results['msg'] = "deployment deleted" + except Exception: + # resource group does not exist + pass + + return self.results + + def deploy_template(self): + """ + Deploy the targeted template and parameters + :param module: Ansible module containing the validated configuration for the deployment template + :param client: resource management client for azure + :param conn_info: connection info needed + :return: + """ + + deploy_parameter = self.rm_models.DeploymentProperties(mode=self.deployment_mode) + if not self.parameters_link: + deploy_parameter.parameters = self.parameters + else: + deploy_parameter.parameters_link = self.rm_models.ParametersLink( + uri=self.parameters_link + ) + if not self.template_link: + deploy_parameter.template = self.template + else: + deploy_parameter.template_link = self.rm_models.TemplateLink( + uri=self.template_link + ) + + try: + # fetch the RG directly (instead of using the base helper) since we don't want to exit if it's missing + rg = self.rm_client.resource_groups.get(self.resource_group) + if rg.tags: + update_tags, self.tags = self.update_tags(rg.tags) + except ResourceNotFoundError: + # resource group does not exist + pass + + params = self.rm_models.ResourceGroup(location=self.location, tags=self.tags) + + try: + self.rm_client.resource_groups.create_or_update(self.resource_group, params) + except Exception as exc: + self.fail("Resource group create_or_update failed with status code: %s and message: %s" % + (exc.status_code, exc.message)) + try: + result = self.rm_client.deployments.begin_create_or_update(self.resource_group, + self.name, + {'properties': deploy_parameter}) + + deployment_result = None + if self.wait_for_deployment_completion: + deployment_result = self.get_poller_result(result) + while deployment_result.properties is None or deployment_result.properties.provisioning_state not in ['Canceled', 'Failed', 'Deleted', + 'Succeeded']: + time.sleep(self.wait_for_deployment_polling_period) + deployment_result = self.rm_client.deployments.get(self.resource_group, self.name) + except Exception as exc: + failed_deployment_operations = self._get_failed_deployment_operations(self.name) + self.log("Deployment failed %s: %s" % (exc.status_code, exc.message)) + error_msg = self._error_msg_from_cloud_error(exc) + self.fail(error_msg, failed_deployment_operations=failed_deployment_operations) + + if self.wait_for_deployment_completion and deployment_result.properties.provisioning_state != 'Succeeded': + self.log("provisioning state: %s" % deployment_result.properties.provisioning_state) + failed_deployment_operations = self._get_failed_deployment_operations(self.name) + self.fail('Deployment failed. Deployment id: %s' % deployment_result.id, + failed_deployment_operations=failed_deployment_operations) + + return deployment_result + + def destroy_resource_group(self): + """ + Destroy the targeted resource group + """ + try: + result = self.rm_client.resource_groups.begin_delete(self.resource_group) + result.wait() # Blocking wait till the delete is finished + except Exception as e: + if e.status_code == 404 or e.status_code == 204: + return + else: + self.fail("Delete resource group and deploy failed with status code: %s and message: %s" % + (e.status_code, e.message)) + + def _get_failed_nested_operations(self, current_operations): + new_operations = [] + for operation in current_operations: + if operation.properties.provisioning_state == 'Failed': + new_operations.append(operation) + if operation.properties.target_resource and \ + 'Microsoft.Resources/deployments' in operation.properties.target_resource.id: + nested_deployment = operation.properties.target_resource.resource_name + try: + nested_operations = self.rm_client.deployment_operations.list(self.resource_group, + nested_deployment) + except Exception as exc: + self.fail("List nested deployment operations failed with status code: %s and message: %s" % + (exc.status_code, exc.message)) + new_nested_operations = self._get_failed_nested_operations(nested_operations) + new_operations += new_nested_operations + return new_operations + + def _get_failed_deployment_operations(self, name): + results = [] + # time.sleep(15) # there is a race condition between when we ask for deployment status and when the + # # status is available. + + try: + operations = self.rm_client.deployment_operations.list(self.resource_group, name) + except Exception as exc: + self.fail("Get deployment failed with status code: %s and message: %s" % + (exc.status_code, exc.message)) + try: + results = [ + dict( + id=op.id, + operation_id=op.operation_id, + status_code=op.properties.status_code, + status_message=op.properties.status_message, + target_resource=dict( + id=op.properties.target_resource.id, + resource_name=op.properties.target_resource.resource_name, + resource_type=op.properties.target_resource.resource_type + ) if op.properties.target_resource else None, + provisioning_state=op.properties.provisioning_state, + ) + for op in self._get_failed_nested_operations(operations) + ] + except Exception: + # If we fail here, the original error gets lost and user receives wrong error message/stacktrace + pass + self.log(dict(failed_deployment_operations=results), pretty_print=True) + return results + + def _get_instances(self, deployment): + dep_tree = self._build_hierarchy(deployment.properties.dependencies) + vms = self._get_dependencies(dep_tree, resource_type="Microsoft.Compute/virtualMachines") + vms_and_nics = [(vm, self._get_dependencies(vm['children'], "Microsoft.Network/networkInterfaces")) + for vm in vms] + vms_and_ips = [(vm['dep'], self._nic_to_public_ips_instance(nics)) + for vm, nics in vms_and_nics] + return [dict(vm_name=vm.resource_name, ips=[self._get_ip_dict(ip) + for ip in ips]) for vm, ips in vms_and_ips if len(ips) > 0] + + def _get_dependencies(self, dep_tree, resource_type): + matches = [value for value in dep_tree.values() if value['dep'].resource_type == resource_type] + for child_tree in [value['children'] for value in dep_tree.values()]: + matches += self._get_dependencies(child_tree, resource_type) + return matches + + def _build_hierarchy(self, dependencies, tree=None): + tree = dict(top=True) if tree is None else tree + for dep in dependencies: + if dep.resource_name not in tree: + tree[dep.resource_name] = dict(dep=dep, children=dict()) + if isinstance(dep, self.rm_models.Dependency) and dep.depends_on is not None and len(dep.depends_on) > 0: + self._build_hierarchy(dep.depends_on, tree[dep.resource_name]['children']) + + if 'top' in tree: + tree.pop('top', None) + keys = list(tree.keys()) + for key1 in keys: + for key2 in keys: + if key2 in tree and key1 in tree[key2]['children'] and key1 in tree: + tree[key2]['children'][key1] = tree[key1] + tree.pop(key1) + return tree + + def _get_ip_dict(self, ip): + ip_dict = dict(name=ip.name, + id=ip.id, + public_ip=ip.ip_address, + public_ip_allocation_method=str(ip.public_ip_allocation_method) + ) + if ip.dns_settings: + ip_dict['dns_settings'] = { + 'domain_name_label': ip.dns_settings.domain_name_label, + 'fqdn': ip.dns_settings.fqdn + } + return ip_dict + + def _nic_to_public_ips_instance(self, nics): + nic_list = [] + for nic in nics: + resp = None + try: + resp = self.network_client.network_interfaces.get(self.resource_group, nic['dep'].resource_name) + except ResourceNotFoundError: + pass + if resp is not None: + nic_list.append(resp) + + return [self.network_client.public_ip_addresses.get(public_ip_id.split('/')[4], public_ip_id.split('/')[-1]) + for nic_obj in nic_list + for public_ip_id in [ip_conf_instance.public_ip_address.id + for ip_conf_instance in nic_obj.ip_configurations + if ip_conf_instance.public_ip_address]] + + def _error_msg_from_cloud_error(self, exc): + msg = '' + status_code = str(exc.status_code) + if status_code.startswith('2'): + msg = 'Deployment failed: {0}'.format(exc.message) + else: + msg = 'Deployment failed with status code: {0} and message: {1}'.format(status_code, exc.message) + return msg + + +def main(): + AzureRMDeploymentManager() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_deployment_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_deployment_info.py new file mode 100644 index 000000000..95dd56195 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_deployment_info.py @@ -0,0 +1,245 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Zim Kalinowski, (@zikalino) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_deployment_info +version_added: "0.1.2" +short_description: Get Azure Deployment facts +description: + - Get facts of Azure Deployment. + +options: + resource_group: + description: + - The name of the resource group. + required: True + name: + description: + - The name of the deployment. + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Get instance of Deployment + azure_rm_deployment_info: + resource_group: myResourceGroup + name: myDeployment +''' + +RETURN = ''' +deployments: + description: + - A list of dictionaries containing facts for deployments. + returned: always + type: complex + contains: + id: + description: + - The identifier of the resource. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Resources/deployments/myDeployment" + resource_group: + description: + - Resource group name. + returned: always + sample: myResourceGroup + name: + description: + - Deployment name. + returned: always + sample: myDeployment + provisioning_state: + description: + - Provisioning state of the deployment. + returned: always + sample: Succeeded + template_link: + description: + - Link to the template. + returned: always + sample: "https://raw.githubusercontent.com/Azure/azure-quickstart-templates/d01a5c06f4f1bc03a049ca17bbbd6e06d62657b3/101-vm-simple-linux/ + azuredeploy.json" + parameters: + description: + - Dictionary containing deployment parameters. + returned: always + type: complex + outputs: + description: + - Dictionary containing deployment outputs. + returned: always + output_resources: + description: + - List of resources. + returned: always + type: complex + contains: + id: + description: + - Resource id. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/networkI + nterfaces/myNetworkInterface" + name: + description: + - Resource name. + returned: always + type: str + sample: myNetworkInterface + type: + description: + - Resource type. + returned: always + type: str + sample: Microsoft.Network/networkInterfaces + depends_on: + description: + - List of resource ids. + type: list + returned: always + sample: + - "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGropup/providers/Microsoft.Network/virtualNet + works/myVirtualNetwork" +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.mgmt.devtestlabs import DevTestLabsClient + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMDeploymentInfo(AzureRMModuleBase): + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str' + ) + ) + self.results = dict( + changed=False + ) + self.resource_group = None + self.name = None + + super(AzureRMDeploymentInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False) + + def exec_module(self, **kwargs): + + is_old_facts = self.module._name == 'azure_rm_deployment_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_deployment_facts' module has been renamed to 'azure_rm_deployment_info'", version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.name: + self.results['deployments'] = self.get() + else: + self.results['deployments'] = self.list() + + return self.results + + def get(self): + response = None + results = [] + try: + response = self.rm_client.deployments.get(self.resource_group, deployment_name=self.name) + self.log("Response : {0}".format(response)) + except ResourceNotFoundError as e: + self.log('Could not get facts for Deployment.') + + if response: + results.append(self.format_response(response)) + + return results + + def list(self): + response = None + results = [] + try: + response = self.rm_client.deployments.list_by_resource_group(self.resource_group) + self.log("Response : {0}".format(response)) + except Exception as e: + self.log('Could not get facts for Deployment.') + + if response is not None: + for item in response: + results.append(self.format_response(item)) + + return results + + def format_response(self, item): + d = item.as_dict() + output_resources = {} + for dependency in d.get('properties', {}).get('dependencies'): + # go through dependent resources + depends_on = [] + for depends_on_resource in dependency['depends_on']: + depends_on.append(depends_on_resource['id']) + # append if not in list + if not output_resources.get(depends_on_resource['id']): + sub_resource = { + 'id': depends_on_resource['id'], + 'name': depends_on_resource['resource_name'], + 'type': depends_on_resource['resource_type'], + 'depends_on': [] + } + output_resources[depends_on_resource['id']] = sub_resource + resource = { + 'id': dependency['id'], + 'name': dependency['resource_name'], + 'type': dependency['resource_type'], + 'depends_on': depends_on + } + output_resources[dependency['id']] = resource + + # convert dictionary to list + output_resources_list = [] + for r in output_resources: + output_resources_list.append(output_resources[r]) + + d = { + 'id': d.get('id'), + 'resource_group': self.resource_group, + 'name': d.get('name'), + 'provisioning_state': d.get('properties', {}).get('provisioning_state'), + 'parameters': d.get('properties', {}).get('parameters'), + 'outputs': d.get('properties', {}).get('outputs'), + 'output_resources': output_resources_list, + 'template_link': d.get('properties', {}).get('template_link', {}).get('uri'), + 'correlation_id': d.get('properties', {}).get('correlation_id') + } + return d + + +def main(): + AzureRMDeploymentInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlab.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlab.py new file mode 100644 index 000000000..531cbf55a --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlab.py @@ -0,0 +1,280 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Zim Kalinowski, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_devtestlab +version_added: "0.1.2" +short_description: Manage Azure DevTest Lab instance +description: + - Create, update and delete instance of Azure DevTest Lab. + +options: + resource_group: + description: + - The name of the resource group. + required: True + name: + description: + - The name of the lab. + required: True + location: + description: + - The location of the resource. + storage_type: + description: + - Type of storage used by the lab. It can be either C(premium) or C(standard). + choices: + - 'standard' + - 'premium' + premium_data_disks: + description: + - Allow creation of premium data disks. + type: bool + state: + description: + - Assert the state of the DevTest Lab. + - Use C(present) to create or update an DevTest Lab and C(absent) to delete it. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Create (or update) DevTest Lab + azure_rm_devtestlab: + resource_group: myResourceGroup + name: mylab + storage_type: standard +''' + +RETURN = ''' +id: + description: + - The identifier of the resource. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/microsoft.devtestlab/labs/mylab +''' + +import time +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +from ansible.module_utils.common.dict_transformations import _snake_to_camel + +try: + from azure.core.polling import LROPoller + from azure.core.exceptions import ResourceNotFoundError + from msrestazure.azure_operation import AzureOperationPoller + from azure.mgmt.devtestlabs import DevTestLabsClient + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMDevTestLab(AzureRMModuleBase): + """Configuration class for an Azure RM DevTest Lab resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + location=dict( + type='str' + ), + storage_type=dict( + type='str', + choices=['standard', + 'premium'] + ), + premium_data_disks=dict( + type='bool' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.name = None + self.lab = {} + + self.results = dict(changed=False) + self.mgmt_client = None + self.state = None + self.to_do = Actions.NoAction + + super(AzureRMDevTestLab, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()) + ['tags']: + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + self.lab[key] = kwargs[key] + + if self.lab.get('storage_type'): + self.lab['lab_storage_type'] = _snake_to_camel(self.lab['storage_type'], True) + self.lab.pop('storage_type', None) + if self.lab.get('premium_data_disks') is not None: + self.lab['premium_data_disks'] = 'Enabled' if self.lab['premium_data_disks'] else 'Disabled' + + response = None + + self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient, + base_url=self._cloud_environment.endpoints.resource_manager, + is_track2=True, + api_version='2018-10-15') + + resource_group = self.get_resource_group(self.resource_group) + if self.lab.get('location') is None: + self.lab['location'] = resource_group.location + + old_response = self.get_devtestlab() + + if not old_response: + self.log("DevTest Lab instance doesn't exist") + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + self.log("DevTest Lab instance already exists") + if self.state == 'absent': + self.to_do = Actions.Delete + elif self.state == 'present': + if self.lab.get('lab_storage_type') is not None and \ + self.lab.get('lab_storage_type').lower() != old_response.get('lab_storage_type', '').lower(): + self.to_do = Actions.Update + if (self.lab.get('premium_data_disks') is not None and + self.lab.get('premium_data_disks').lower() != old_response.get('premium_data_disks').lower()): + self.to_do = Actions.Update + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log("Need to Create / Update the DevTest Lab instance") + self.results['changed'] = True + + if self.check_mode: + return self.results + + response = self.create_update_devtestlab() + self.log("Creation / Update done") + elif self.to_do == Actions.Delete: + self.log("DevTest Lab instance deleted") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_devtestlab() + # This currently doesnt' work as there is a bug in SDK / Service + if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller): + response = self.get_poller_result(response) + else: + self.log("DevTest Lab instance unchanged") + self.results['changed'] = False + response = old_response + + if self.state == 'present': + self.results.update({ + 'id': response.get('id', None) + }) + return self.results + + def create_update_devtestlab(self): + ''' + Creates or updates DevTest Lab with the specified configuration. + + :return: deserialized DevTest Lab instance state dictionary + ''' + self.log("Creating / Updating the DevTest Lab instance {0}".format(self.name)) + + try: + response = self.mgmt_client.labs.begin_create_or_update(resource_group_name=self.resource_group, + name=self.name, + lab=self.lab) + if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller): + response = self.get_poller_result(response) + + except Exception as exc: + self.log('Error attempting to create the DevTest Lab instance.') + self.fail("Error creating the DevTest Lab instance: {0}".format(str(exc))) + return response.as_dict() + + def delete_devtestlab(self): + ''' + Deletes specified DevTest Lab instance in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the DevTest Lab instance {0}".format(self.name)) + try: + response = self.mgmt_client.labs.begin_delete(resource_group_name=self.resource_group, + name=self.name) + except Exception as e: + self.log('Error attempting to delete the DevTest Lab instance.') + self.fail("Error deleting the DevTest Lab instance: {0}".format(str(e))) + + return True + + def get_devtestlab(self): + ''' + Gets the properties of the specified DevTest Lab. + + :return: deserialized DevTest Lab instance state dictionary + ''' + self.log("Checking if the DevTest Lab instance {0} is present".format(self.name)) + found = False + try: + response = self.mgmt_client.labs.get(resource_group_name=self.resource_group, + name=self.name) + found = True + self.log("Response : {0}".format(response)) + self.log("DevTest Lab instance : {0} found".format(response.name)) + except ResourceNotFoundError as e: + self.log('Did not find the DevTest Lab instance.') + if found is True: + return response.as_dict() + + return False + + +def main(): + """Main execution""" + AzureRMDevTestLab() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlab_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlab_info.py new file mode 100644 index 000000000..63abdd357 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlab_info.py @@ -0,0 +1,272 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Zim Kalinowski, (@zikalino) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_devtestlab_info +version_added: "0.1.2" +short_description: Get Azure DevTest Lab facts +description: + - Get facts of Azure DevTest Lab. + +options: + resource_group: + description: + - The name of the resource group. + type: str + name: + description: + - The name of the lab. + type: str + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) +''' + +EXAMPLES = ''' + - name: List instances of DevTest Lab by resource group + azure_rm_devtestlab_info: + resource_group: testrg + tags: + - key:value + + - name: List instances of DevTest Lab in subscription + azure_rm_devtestlab_info: + + - name: Get instance of DevTest Lab + azure_rm_devtestlab_info: + resource_group: testrg + name: testlab +''' + +RETURN = ''' +labs: + description: + - A list of dictionaries containing facts for Lab. + returned: always + type: complex + contains: + id: + description: + - The identifier of the resource. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/microsoft.devtestlab/labs/myLab + resource_group: + description: + - The name of the resource. + returned: always + type: str + sample: testrg + name: + description: + - The name of the resource. + returned: always + type: str + sample: testlab + location: + description: + - The location of the resource. + returned: always + type: str + sample: eastus + storage_type: + description: + - Lab storage type. + returned: always + type: str + sample: standard + premium_data_disks: + description: + - Are premium data disks allowed. + returned: always + type: bool + sample: false + provisioning_state: + description: + - Lab provisioning state. + returned: always + type: str + sample: Succeeded + artifacts_storage_account: + description: + - Artifacts storage account ID. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Storage/storageAccounts/myLab6346 + default_premium_storage_account: + description: + - Default premium storage account ID. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Storage/storageAccounts/myLab6346 + default_storage_account: + description: + - Default storage account ID. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Storage/storageAccounts/myLab6346 + premium_data_disk_storage_account: + description: + - Default storage account ID. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Storage/storageAccounts/myLab6346 + vault_name: + description: + - Key vault ID. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.KeyVault/vaults/myLab6788 + tags: + description: + - The tags of the resource. + returned: always + type: complex + sample: "{ 'MyTag': 'MyValue' }" +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.mgmt.devtestlabs import DevTestLabsClient + from msrest.serialization import Model + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMDevTestLabInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str' + ), + name=dict( + type='str' + ), + tags=dict( + type='list', + elements='str' + ) + ) + # store the results of the module operation + self.results = dict( + changed=False + ) + self.mgmt_client = None + self.resource_group = None + self.name = None + self.tags = None + super(AzureRMDevTestLabInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False, facts_module=True) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_devtestlab_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_devtestlab_facts' module has been renamed to 'azure_rm_devtestlab_info'", version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient, + is_track2=True, + base_url=self._cloud_environment.endpoints.resource_manager) + + if self.resource_group is not None: + if self.name is not None: + self.results['labs'] = self.get() + else: + self.results['labs'] = self.list_by_resource_group() + else: + self.results['labs'] = self.list_by_subscription() + return self.results + + def list_by_resource_group(self): + response = None + results = [] + try: + response = self.mgmt_client.labs.list_by_resource_group(resource_group_name=self.resource_group) + self.log("Response : {0}".format(response)) + except Exception as e: + self.log('Could not get facts for Lab.') + + if response is not None: + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(self.format_response(item)) + + return results + + def list_by_subscription(self): + response = None + results = [] + try: + response = self.mgmt_client.labs.list_by_subscription() + self.log("Response : {0}".format(response)) + except Exception as e: + self.log('Could not get facts for Lab.') + + if response is not None: + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(self.format_response(item)) + + return results + + def get(self): + response = None + results = [] + try: + response = self.mgmt_client.labs.get(resource_group_name=self.resource_group, + name=self.name) + self.log("Response : {0}".format(response)) + except ResourceNotFoundError as e: + self.log('Could not get facts for Lab.') + + if response and self.has_tags(response.tags, self.tags): + results.append(self.format_response(response)) + + return results + + def format_response(self, item): + d = item.as_dict() + d = { + 'id': d.get('id', None), + 'resource_group': self.resource_group, + 'name': d.get('name', None), + 'location': d.get('location', '').replace(' ', '').lower(), + 'storage_type': d.get('lab_storage_type', '').lower(), + 'premium_data_disks': d.get('premium_data_disks') == 'Enabled', + 'provisioning_state': d.get('provisioning_state'), + 'artifacts_storage_account': d.get('artifacts_storage_account'), + 'default_premium_storage_account': d.get('default_premium_storage_account'), + 'default_storage_account': d.get('default_storage_account'), + 'premium_data_disk_storage_account': d.get('premium_data_disk_storage_account'), + 'vault_name': d.get('vault_name'), + 'tags': d.get('tags', None) + } + return d + + +def main(): + AzureRMDevTestLabInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabarmtemplate_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabarmtemplate_info.py new file mode 100644 index 000000000..42556856c --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabarmtemplate_info.py @@ -0,0 +1,223 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Zim Kalinowski, (@zikalino) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_devtestlabarmtemplate_info +version_added: "0.1.2" +short_description: Get Azure DevTest Lab ARM Template facts +description: + - Get facts of Azure DevTest Lab ARM Template. + +options: + resource_group: + description: + - The name of the resource group. + required: True + type: str + lab_name: + description: + - The name of the lab. + required: True + type: str + artifact_source_name: + description: + - The name of the artifact source. + required: True + type: str + name: + description: + - The name of the ARM template. + type: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Get information on DevTest Lab ARM Template + azure_rm_devtestlabarmtemplate_info: + resource_group: myResourceGroup + lab_name: myLab + artifact_source_name: public environment repo + name: WebApp +''' + +RETURN = ''' +arm_templates: + description: + - A list of dictionaries containing facts for DevTest Lab ARM Template. + returned: always + type: complex + contains: + id: + description: + - The identifier of the resource. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DevTestLab/labs/myLab/art + ifactSources/public environment repo/armTemplates/WebApp" + resource_group: + description: + - Resource group name. + returned: always + sample: myResourceGroup + lab_name: + description: + - DevTest Lab name. + returned: always + sample: myLab + artifact_source_name: + description: + - Artifact source name. + returned: always + sample: public environment repo + name: + description: + - ARM Template name. + returned: always + sample: WebApp + display_name: + description: + - The tags of the resource. + returned: always + sample: Web App + description: + description: + - The tags of the resource. + returned: always + sample: This template creates an Azure Web App without a data store. + publisher: + description: + - The tags of the resource. + returned: always + sample: Microsoft +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.mgmt.devtestlabs import DevTestLabsClient + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMDtlArmTemplateInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + lab_name=dict( + type='str', + required=True + ), + artifact_source_name=dict( + type='str', + required=True + ), + name=dict( + type='str' + ) + ) + # store the results of the module operation + self.results = dict( + changed=False + ) + self.mgmt_client = None + self.resource_group = None + self.lab_name = None + self.artifact_source_name = None + self.name = None + super(AzureRMDtlArmTemplateInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_devtestlabarmtemplate_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_devtestlabarmtemplate_facts' module has been renamed to 'azure_rm_devtestlabarmtemplate_info'", + version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient, + is_track2=True, + base_url=self._cloud_environment.endpoints.resource_manager) + + if self.name: + self.results['armtemplates'] = self.get() + else: + self.results['armtemplates'] = self.list() + + return self.results + + def list(self): + response = None + results = [] + try: + response = self.mgmt_client.arm_templates.list(resource_group_name=self.resource_group, + lab_name=self.lab_name, + artifact_source_name=self.artifact_source_name) + self.log("Response : {0}".format(response)) + except Exception as e: + self.fail('Could not get facts for DTL ARM Template.') + + if response is not None: + for item in response: + results.append(self.format_response(item)) + + return results + + def get(self): + response = None + results = [] + try: + response = self.mgmt_client.arm_templates.get(resource_group_name=self.resource_group, + lab_name=self.lab_name, + artifact_source_name=self.artifact_source_name, + name=self.name) + self.log("Response : {0}".format(response)) + except ResourceNotFoundError as e: + self.fail('Could not get facts for DTL ARM Template.') + + if response: + results.append(self.format_response(response)) + + return results + + def format_response(self, item): + d = item.as_dict() + d = { + 'resource_group': self.parse_resource_to_dict(d.get('id')).get('resource_group'), + 'lab_name': self.parse_resource_to_dict(d.get('id')).get('name'), + 'artifact_source_name': self.parse_resource_to_dict(d.get('id')).get('child_name_1'), + 'id': d.get('id', None), + 'name': d.get('name'), + 'display_name': d.get('display_name'), + 'description': d.get('description'), + 'publisher': d.get('publisher') + } + return d + + +def main(): + AzureRMDtlArmTemplateInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabartifact_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabartifact_info.py new file mode 100644 index 000000000..7100ad122 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabartifact_info.py @@ -0,0 +1,246 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Zim Kalinowski, (@zikalino) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_devtestlabartifact_info +version_added: "0.1.2" +short_description: Get Azure DevTest Lab Artifact facts +description: + - Get facts of Azure DevTest Lab Artifact. + +options: + resource_group: + description: + - The name of the resource group. + required: True + type: str + lab_name: + description: + - The name of the lab. + required: True + type: str + artifact_source_name: + description: + - The name of the artifact source. + required: True + type: str + name: + description: + - The name of the artifact. + type: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Get instance of DevTest Lab Artifact + azure_rm_devtestlabartifact_info: + resource_group: myResourceGroup + lab_name: myLab + artifact_source_name: myArtifactSource + name: myArtifact +''' + +RETURN = ''' +artifacts: + description: + - A list of dictionaries containing facts for DevTest Lab Artifact. + returned: always + type: complex + contains: + id: + description: + - The identifier of the artifact. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DevTestLab/labs/myLab/ar + tifactSources/myArtifactSource/artifacts/myArtifact" + resource_group: + description: + - Name of the resource group. + returned: always + type: str + sample: myResourceGroup + lab_name: + description: + - Name of the lab. + returned: always + type: str + sample: myLab + artifact_source_name: + description: + - The name of the artifact source. + returned: always + type: str + sample: myArtifactSource + name: + description: + - The name of the artifact. + returned: always + type: str + sample: myArtifact + description: + description: + - Description of the artifact. + returned: always + type: str + sample: Installs My Software + file_path: + description: + - Artifact's path in the repo. + returned: always + type: str + sample: Artifacts/myArtifact + publisher: + description: + - Publisher name. + returned: always + type: str + sample: MyPublisher + target_os_type: + description: + - Target OS type. + returned: always + type: str + sample: Linux + title: + description: + - Title of the artifact. + returned: always + type: str + sample: My Software + parameters: + description: + - A dictionary containing parameters definition of the artifact. + returned: always + type: complex + sample: {} +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.mgmt.devtestlabs import DevTestLabsClient + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMArtifactInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + lab_name=dict( + type='str', + required=True + ), + artifact_source_name=dict( + type='str', + required=True + ), + name=dict( + type='str' + ) + ) + # store the results of the module operation + self.results = dict( + changed=False + ) + self.mgmt_client = None + self.resource_group = None + self.lab_name = None + self.artifact_source_name = None + self.name = None + super(AzureRMArtifactInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False) + + def exec_module(self, **kwargs): + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient, + is_track2=True, + base_url=self._cloud_environment.endpoints.resource_manager) + + if self.name: + self.results['artifacts'] = self.get() + else: + self.results['artifacts'] = self.list() + + return self.results + + def get(self): + response = None + results = [] + try: + response = self.mgmt_client.artifacts.get(resource_group_name=self.resource_group, + lab_name=self.lab_name, + artifact_source_name=self.artifact_source_name, + name=self.name) + self.log("Response : {0}".format(response)) + except ResourceNotFoundError as e: + self.log('Could not get facts for Artifact.') + + if response: + results.append(self.format_response(response)) + + return results + + def list(self): + response = None + results = [] + try: + response = self.mgmt_client.artifacts.list(resource_group_name=self.resource_group, + lab_name=self.lab_name, + artifact_source_name=self.artifact_source_name) + self.log("Response : {0}".format(response)) + except Exception as e: + self.log('Could not get facts for Artifact.') + + if response is not None: + for item in response: + results.append(self.format_response(item)) + + return results + + def format_response(self, item): + d = item.as_dict() + d = { + 'resource_group': self.parse_resource_to_dict(d.get('id')).get('resource_group'), + 'lab_name': self.parse_resource_to_dict(d.get('id')).get('name'), + 'artifact_source_name': self.parse_resource_to_dict(d.get('id')).get('child_name_1'), + 'id': d.get('id'), + 'description': d.get('description'), + 'file_path': d.get('file_path'), + 'name': d.get('name'), + 'parameters': d.get('parameters'), + 'publisher': d.get('publisher'), + 'target_os_type': d.get('target_os_type'), + 'title': d.get('title') + } + return d + + +def main(): + AzureRMArtifactInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabartifactsource.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabartifactsource.py new file mode 100644 index 000000000..5ca4a5772 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabartifactsource.py @@ -0,0 +1,362 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Zim Kalinowski, (@zikalino) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_devtestlabartifactsource +version_added: "0.1.2" +short_description: Manage Azure DevTest Labs Artifacts Source instance +description: + - Create, update and delete instance of Azure DevTest Labs Artifacts Source. + +options: + resource_group: + description: + - The name of the resource group. + required: True + lab_name: + description: + - The name of the lab. + required: True + name: + description: + - The name of the artifact source. + required: True + display_name: + description: + - The artifact source's display name. + uri: + description: + - The artifact source's URI. + source_type: + description: + - The artifact source's type. + choices: + - 'vso' + - 'github' + folder_path: + description: + - The folder containing artifacts. + arm_template_folder_path: + description: + - The folder containing Azure Resource Manager templates. + branch_ref: + description: + - The artifact source's branch reference. + security_token: + description: + - The security token to authenticate to the artifact source. + is_enabled: + description: + - Indicates whether the artifact source is enabled. + type: bool + state: + description: + - Assert the state of the DevTest Labs Artifacts Source. + - Use C(present) to create or update an DevTest Labs Artifacts Source and C(absent) to delete it. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Create (or update) DevTest Labs Artifacts Source + azure_rm_devtestlabartifactsource: + resource_group: myrg + lab_name: mylab + name: myartifacts + uri: https://github.com/myself/myrepo.git + source_type: github + folder_path: / + security_token: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +''' + +RETURN = ''' +id: + description: + - The identifier of the resource. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myrg/providers/microsoft.devtestlab/labs/mylab/artifactsources/myartifacts +is_enabled: + description: + - Indicates whether the artifact source is enabled. + returned: always + type: bool + sample: true +''' + +import time +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +from ansible.module_utils.common.dict_transformations import _snake_to_camel + +try: + from azure.core.polling import LROPoller + from azure.core.exceptions import ResourceNotFoundError + from msrestazure.azure_operation import AzureOperationPoller + from azure.mgmt.devtestlabs import DevTestLabsClient + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMDevTestLabArtifactsSource(AzureRMModuleBase): + """Configuration class for an Azure RM DevTest Labs Artifacts Source resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + lab_name=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + display_name=dict( + type='str' + ), + uri=dict( + type='str' + ), + source_type=dict( + type='str', + choices=['vso', + 'github'] + ), + folder_path=dict( + type='str' + ), + arm_template_folder_path=dict( + type='str' + ), + branch_ref=dict( + type='str' + ), + security_token=dict( + type='str', + no_log=True + ), + is_enabled=dict( + type='bool' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.lab_name = None + self.name = None + self.artifact_source = dict() + + self.results = dict(changed=False) + self.mgmt_client = None + self.state = None + self.to_do = Actions.NoAction + + required_if = [ + ('state', 'present', [ + 'source_type', 'uri', 'security_token']) + ] + + super(AzureRMDevTestLabArtifactsSource, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True, + required_if=required_if) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()) + ['tags']: + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + self.artifact_source[key] = kwargs[key] + + if self.artifact_source.get('source_type') == 'github': + self.artifact_source['source_type'] = 'GitHub' + elif self.artifact_source.get('source_type') == 'vso': + self.artifact_source['source_type'] = 'VsoGit' + + if self.artifact_source.get('status') is not None: + self.artifact_source['status'] = 'Enabled' if self.artifact_source.get('status') else 'Disabled' + + response = None + + self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient, + base_url=self._cloud_environment.endpoints.resource_manager, + is_track2=True, + api_version='2018-10-15') + + old_response = self.get_devtestlabartifactssource() + + if not old_response: + self.log("DevTest Labs Artifacts Source instance doesn't exist") + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + self.log("DevTest Labs Artifacts Source instance already exists") + if self.state == 'absent': + self.to_do = Actions.Delete + elif self.state == 'present': + self.results['old_response'] = old_response + + if self.artifact_source.get('display_name') is not None: + if self.artifact_source.get('display_name') != old_response.get('display_name'): + self.to_do = Actions.Update + else: + self.artifact_source['display_name'] = old_response.get('display_name') + + if self.artifact_source.get('source_type').lower() != old_response.get('source_type').lower(): + self.to_do = Actions.Update + + if self.artifact_source.get('uri') != old_response.get('uri'): + self.to_do = Actions.Update + + if self.artifact_source.get('branch_ref') is not None: + if self.artifact_source.get('branch_ref') != old_response.get('branch_ref'): + self.to_do = Actions.Update + else: + self.artifact_source['branch_ref'] = old_response.get('branch_ref') + + if self.artifact_source.get('status') is not None: + if self.artifact_source.get('status') != old_response.get('status'): + self.to_do = Actions.Update + else: + self.artifact_source['status'] = old_response.get('status') + + if self.artifact_source.get('folder_path') is not None: + if self.artifact_source.get('folder_path') != old_response.get('folder_path'): + self.to_do = Actions.Update + else: + self.artifact_source['folder_path'] = old_response.get('folder_path') + + if self.artifact_source.get('arm_template_folder_path') is not None: + if self.artifact_source.get('arm_template_folder_path') != old_response.get('arm_template_folder_path'): + self.to_do = Actions.Update + else: + self.artifact_source['arm_template_folder_path'] = old_response.get('arm_template_folder_path') + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log("Need to Create / Update the DevTest Labs Artifacts Source instance") + self.results['changed'] = True + if self.check_mode: + return self.results + response = self.create_update_devtestlabartifactssource() + self.log("Creation / Update done") + elif self.to_do == Actions.Delete: + self.log("DevTest Labs Artifacts Source instance deleted") + self.results['changed'] = True + if self.check_mode: + return self.results + self.delete_devtestlabartifactssource() + else: + self.log("DevTest Labs Artifacts Source instance unchanged") + self.results['changed'] = False + response = old_response + + if self.state == 'present': + self.results.update({ + 'id': response.get('id', None), + 'is_enabled': (response.get('status', None).lower() == 'enabled') + }) + return self.results + + def create_update_devtestlabartifactssource(self): + ''' + Creates or updates DevTest Labs Artifacts Source with the specified configuration. + + :return: deserialized DevTest Labs Artifacts Source instance state dictionary + ''' + self.log("Creating / Updating the DevTest Labs Artifacts Source instance {0}".format(self.name)) + + try: + response = self.mgmt_client.artifact_sources.create_or_update(resource_group_name=self.resource_group, + lab_name=self.lab_name, + name=self.name, + artifact_source=self.artifact_source) + if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller): + response = self.get_poller_result(response) + + except Exception as exc: + self.log('Error attempting to create the DevTest Labs Artifacts Source instance.') + self.fail("Error creating the DevTest Labs Artifacts Source instance: {0}".format(str(exc))) + return response.as_dict() + + def delete_devtestlabartifactssource(self): + ''' + Deletes specified DevTest Labs Artifacts Source instance in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the DevTest Labs Artifacts Source instance {0}".format(self.name)) + try: + response = self.mgmt_client.artifact_sources.delete(resource_group_name=self.resource_group, + lab_name=self.lab_name, + name=self.name) + except Exception as e: + self.log('Error attempting to delete the DevTest Labs Artifacts Source instance.') + self.fail("Error deleting the DevTest Labs Artifacts Source instance: {0}".format(str(e))) + + return True + + def get_devtestlabartifactssource(self): + ''' + Gets the properties of the specified DevTest Labs Artifacts Source. + + :return: deserialized DevTest Labs Artifacts Source instance state dictionary + ''' + self.log("Checking if the DevTest Labs Artifacts Source instance {0} is present".format(self.name)) + found = False + try: + response = self.mgmt_client.artifact_sources.get(resource_group_name=self.resource_group, + lab_name=self.lab_name, + name=self.name) + found = True + self.log("Response : {0}".format(response)) + self.log("DevTest Labs Artifacts Source instance : {0} found".format(response.name)) + except ResourceNotFoundError as e: + self.log('Did not find the DevTest Labs Artifacts Source instance.') + if found is True: + return response.as_dict() + + return False + + +def main(): + """Main execution""" + AzureRMDevTestLabArtifactsSource() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabartifactsource_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabartifactsource_info.py new file mode 100644 index 000000000..96bdde553 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabartifactsource_info.py @@ -0,0 +1,256 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Zim Kalinowski, (@zikalino) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_devtestlabartifactsource_info +version_added: "0.1.2" +short_description: Get Azure DevTest Lab Artifact Source facts +description: + - Get facts of Azure DevTest Lab Artifact Source. + +options: + resource_group: + description: + - The name of the resource group. + required: True + type: str + lab_name: + description: + - The name of DevTest Lab. + required: True + type: str + name: + description: + - The name of DevTest Lab Artifact Source. + type: str + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Get instance of DevTest Lab Artifact Source + azure_rm_devtestlabartifactsource_info: + resource_group: myResourceGroup + lab_name: myLab + name: myArtifactSource +''' + +RETURN = ''' +artifactsources: + description: + - A list of dictionaries containing facts for DevTest Lab Artifact Source. + returned: always + type: complex + contains: + id: + description: + - The identifier of the artifact source. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DevTestLab/labs/myLab/ar + tifactSources/myArtifactSource" + resource_group: + description: + - Name of the resource group. + returned: always + type: str + sample: myResourceGroup + lab_name: + description: + - Name of the lab. + returned: always + type: str + sample: myLab + name: + description: + - The name of the artifact source. + returned: always + type: str + sample: myArtifactSource + display_name: + description: + - The artifact source's display name. + returned: always + type: str + sample: Public Artifact Repo + source_type: + description: + - The artifact source's type. + returned: always + type: str + sample: github + is_enabled: + description: + - Is the artifact source enabled. + returned: always + type: str + sample: True + uri: + description: + - URI of the artifact source. + returned: always + type: str + sample: https://github.com/Azure/azure-devtestlab.git + folder_path: + description: + - The folder containing artifacts. + returned: always + type: str + sample: /Artifacts + arm_template_folder_path: + description: + - The folder containing Azure Resource Manager templates. + returned: always + type: str + sample: /Environments + provisioning_state: + description: + - Provisioning state of artifact source. + returned: always + type: str + sample: Succeeded + tags: + description: + - The tags of the resource. + returned: always + type: complex + sample: "{ 'MyTag': 'MyValue' }" +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.mgmt.devtestlabs import DevTestLabsClient + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMDtlArtifactSourceInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + lab_name=dict( + type='str', + required=True + ), + name=dict( + type='str' + ), + tags=dict( + type='list', + elements='str' + ) + ) + # store the results of the module operation + self.results = dict( + changed=False + ) + self.mgmt_client = None + self.resource_group = None + self.lab_name = None + self.name = None + self.tags = None + super(AzureRMDtlArtifactSourceInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False, facts_module=True) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_devtestlabartifactsource_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_devtestlabartifactsource_facts' module has been renamed to 'azure_rm_devtestlabartifactsource_info'", + version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient, + is_track2=True, + base_url=self._cloud_environment.endpoints.resource_manager) + + if self.name: + self.results['artifactsources'] = self.get() + else: + self.results['artifactsources'] = self.list() + + return self.results + + def get(self): + response = None + results = [] + try: + response = self.mgmt_client.artifact_sources.get(resource_group_name=self.resource_group, + lab_name=self.lab_name, + name=self.name) + self.log("Response : {0}".format(response)) + except ResourceNotFoundError as e: + self.fail('Could not get facts for Artifact Source.') + + if response and self.has_tags(response.tags, self.tags): + results.append(self.format_response(response)) + + return results + + def list(self): + response = None + results = [] + try: + response = self.mgmt_client.artifact_sources.list(resource_group_name=self.resource_group, + lab_name=self.lab_name) + self.log("Response : {0}".format(response)) + except Exception as e: + self.fail('Could not get facts for Artifact Source.') + + if response is not None: + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(self.format_response(item)) + + return results + + def format_response(self, item): + d = item.as_dict() + d = { + 'id': d.get('id'), + 'resource_group': self.parse_resource_to_dict(d.get('id')).get('resource_group'), + 'lab_name': self.parse_resource_to_dict(d.get('id')).get('name'), + 'name': d.get('name'), + 'display_name': d.get('display_name'), + 'tags': d.get('tags'), + 'source_type': d.get('source_type').lower(), + 'is_enabled': d.get('status') == 'Enabled', + 'uri': d.get('uri'), + 'arm_template_folder_path': d.get('arm_template_folder_path'), + 'folder_path': d.get('folder_path'), + 'provisioning_state': d.get('provisioning_state') + } + return d + + +def main(): + AzureRMDtlArtifactSourceInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabcustomimage.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabcustomimage.py new file mode 100644 index 000000000..7b611706b --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabcustomimage.py @@ -0,0 +1,379 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Zim Kalinowski, (@zikalino) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_devtestlabcustomimage +version_added: "0.1.2" +short_description: Manage Azure DevTest Lab Custom Image instance +description: + - Create, update and delete instance of Azure DevTest Lab Custom Image. + +options: + resource_group: + description: + - The name of the resource group. + required: True + lab_name: + description: + - The name of the lab. + required: True + name: + description: + - The name of the custom image. + required: True + source_vm: + description: + - Source DevTest Lab virtual machine name. + windows_os_state: + description: + - The state of the Windows OS. + choices: + - 'non_sysprepped' + - 'sysprep_requested' + - 'sysprep_applied' + linux_os_state: + description: + - The state of the Linux OS. + choices: + - 'non_deprovisioned' + - 'deprovision_requested' + - 'deprovision_applied' + description: + description: + - The description of the custom image. + author: + description: + - The author of the custom image. + state: + description: + - Assert the state of the Custom Image. + - Use C(present) to create or update an Custom Image and C(absent) to delete it. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' +- name: Create instance of DevTest Lab Image + azure_rm_devtestlabcustomimage: + resource_group: myResourceGroup + lab_name: myLab + name: myImage + source_vm: myDevTestLabVm + linux_os_state: non_deprovisioned +''' + +RETURN = ''' +id: + description: + - The identifier of the resource. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/microsoft.devtestlab/labs/myLab/images/myImage" +''' + +import time +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +from ansible.module_utils.common.dict_transformations import _snake_to_camel + +try: + from azure.core.polling import LROPoller + from azure.core.exceptions import ResourceNotFoundError + from msrestazure.azure_operation import AzureOperationPoller + from azure.mgmt.devtestlabs import DevTestLabsClient + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMDtlCustomImage(AzureRMModuleBase): + """Configuration class for an Azure RM Custom Image resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + lab_name=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + source_vm=dict( + type='str' + ), + windows_os_state=dict( + type='str', + choices=['non_sysprepped', + 'sysprep_requested', + 'sysprep_applied'] + ), + linux_os_state=dict( + type='str', + choices=['non_deprovisioned', + 'deprovision_requested', + 'deprovision_applied'] + ), + description=dict( + type='str' + ), + author=dict( + type='str' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.lab_name = None + self.name = None + self.custom_image = dict() + + self.results = dict(changed=False) + self.mgmt_client = None + self.state = None + self.to_do = Actions.NoAction + + required_if = [ + ('state', 'present', [ + 'source_vm']) + ] + + super(AzureRMDtlCustomImage, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True, + required_if=required_if) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()) + ['tags']: + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + self.custom_image[key] = kwargs[key] + + if self.state == 'present': + windows_os_state = self.custom_image.pop('windows_os_state', False) + linux_os_state = self.custom_image.pop('linux_os_state', False) + source_vm_name = self.custom_image.pop('source_vm') + temp = "/subscriptions/{0}/resourcegroups/{1}/providers/microsoft.devtestlab/labs/{2}/virtualmachines/{3}" + self.custom_image['vm'] = {} + self.custom_image['vm']['source_vm_id'] = temp.format(self.subscription_id, self.resource_group, self.lab_name, source_vm_name) + if windows_os_state: + self.custom_image['vm']['windows_os_info'] = {'windows_os_state': _snake_to_camel(windows_os_state, True)} + elif linux_os_state: + self.custom_image['vm']['linux_os_info'] = {'linux_os_state': _snake_to_camel(linux_os_state, True)} + else: + self.fail("Either 'linux_os_state' or 'linux_os_state' must be specified") + + response = None + + self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient, + is_track2=True, + base_url=self._cloud_environment.endpoints.resource_manager) + + old_response = self.get_customimage() + + if not old_response: + self.log("Custom Image instance doesn't exist") + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + self.log("Custom Image instance already exists") + if self.state == 'absent': + self.to_do = Actions.Delete + elif self.state == 'present': + if (not default_compare(self.custom_image, old_response, '', self.results)): + self.to_do = Actions.Update + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log("Need to Create / Update the Custom Image instance") + + if self.check_mode: + self.results['changed'] = True + return self.results + + response = self.create_update_customimage() + + self.results['changed'] = True + self.log("Creation / Update done") + elif self.to_do == Actions.Delete: + self.log("Custom Image instance deleted") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_customimage() + # This currently doesnt' work as there is a bug in SDK / Service + if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller): + response = self.get_poller_result(response) + else: + self.log("Custom Image instance unchanged") + self.results['changed'] = False + response = old_response + + if self.state == 'present': + self.results.update({ + 'id': response.get('id', None) + }) + return self.results + + def create_update_customimage(self): + ''' + Creates or updates Custom Image with the specified configuration. + + :return: deserialized Custom Image instance state dictionary + ''' + self.log("Creating / Updating the Custom Image instance {0}".format(self.name)) + + try: + response = self.mgmt_client.custom_images.begin_create_or_update(resource_group_name=self.resource_group, + lab_name=self.lab_name, + name=self.name, + custom_image=self.custom_image) + if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller): + response = self.get_poller_result(response) + + except Exception as exc: + self.log('Error attempting to create the Custom Image instance.') + self.fail("Error creating the Custom Image instance: {0}".format(str(exc))) + return response.as_dict() + + def delete_customimage(self): + ''' + Deletes specified Custom Image instance in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the Custom Image instance {0}".format(self.name)) + try: + response = self.mgmt_client.custom_images.begin_delete(resource_group_name=self.resource_group, + lab_name=self.lab_name, + name=self.name) + except Exception as e: + self.log('Error attempting to delete the Custom Image instance.') + self.fail("Error deleting the Custom Image instance: {0}".format(str(e))) + + return True + + def get_customimage(self): + ''' + Gets the properties of the specified Custom Image. + + :return: deserialized Custom Image instance state dictionary + ''' + self.log("Checking if the Custom Image instance {0} is present".format(self.name)) + found = False + try: + response = self.mgmt_client.custom_images.get(resource_group_name=self.resource_group, + lab_name=self.lab_name, + name=self.name) + found = True + self.log("Response : {0}".format(response)) + self.log("Custom Image instance : {0} found".format(response.name)) + except ResourceNotFoundError as e: + self.log('Did not find the Custom Image instance.') + if found is True: + return response.as_dict() + + return False + + +def default_compare(new, old, path, result): + if new is None: + return True + elif isinstance(new, dict): + if not isinstance(old, dict): + result['compare'] = 'changed [' + path + '] old dict is null' + return False + for k in new.keys(): + if not default_compare(new.get(k), old.get(k, None), path + '/' + k, result): + return False + return True + elif isinstance(new, list): + if not isinstance(old, list) or len(new) != len(old): + result['compare'] = 'changed [' + path + '] length is different or null' + return False + if isinstance(old[0], dict): + key = None + if 'id' in old[0] and 'id' in new[0]: + key = 'id' + elif 'name' in old[0] and 'name' in new[0]: + key = 'name' + else: + key = list(old[0])[0] + new = sorted(new, key=lambda x: x.get(key, None)) + old = sorted(old, key=lambda x: x.get(key, None)) + else: + new = sorted(new) + old = sorted(old) + for i in range(len(new)): + if not default_compare(new[i], old[i], path + '/*', result): + return False + return True + else: + if path == '/location': + new = new.replace(' ', '').lower() + old = new.replace(' ', '').lower() + if new == old: + return True + else: + result['compare'] = 'changed [' + path + '] ' + str(new) + ' != ' + str(old) + return False + + +def dict_camelize(d, path, camelize_first): + if isinstance(d, list): + for i in range(len(d)): + dict_camelize(d[i], path, camelize_first) + elif isinstance(d, dict): + if len(path) == 1: + old_value = d.get(path[0], None) + if old_value is not None: + d[path[0]] = _snake_to_camel(old_value, camelize_first) + else: + sd = d.get(path[0], None) + if sd is not None: + dict_camelize(sd, path[1:], camelize_first) + + +def main(): + """Main execution""" + AzureRMDtlCustomImage() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabcustomimage_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabcustomimage_info.py new file mode 100644 index 000000000..46ba637df --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabcustomimage_info.py @@ -0,0 +1,228 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Zim Kalinowski, (@zikalino) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_devtestlabcustomimage_info +version_added: "0.1.2" +short_description: Get Azure DevTest Lab Custom Image facts +description: + - Get facts of Azure Azure DevTest Lab Custom Image. + +options: + resource_group: + description: + - The name of the resource group. + required: True + type: str + lab_name: + description: + - The name of the lab. + required: True + type: str + name: + description: + - The name of the custom image. + type: str + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Get instance of Custom Image + azure_rm_devtestlabcustomimage_info: + resource_group: myResourceGroup + lab_name: myLab + name: myImage + + - name: List instances of Custom Image in the lab + azure_rm_devtestlabcustomimage_info: + resource_group: myResourceGroup + lab_name: myLab + name: myImage +''' + +RETURN = ''' +custom_images: + description: + - A list of dictionaries containing facts for Custom Image. + returned: always + type: complex + contains: + id: + description: + - The identifier of the artifact source. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DevTestLab/labs/myLab/cu + stomimages/myImage" + resource_group: + description: + - Name of the resource group. + returned: always + type: str + sample: myResourceGroup + lab_name: + description: + - Name of the lab. + returned: always + type: str + sample: myLab + name: + description: + - The name of the image. + returned: always + type: str + sample: myImage + managed_shapshot_id: + description: + - Managed snapshot id. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/microsoft.compute/snapshots/myImage" + source_vm_id: + description: + - Source VM id. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx//resourcegroups/myResourceGroup/providers/microsoft.devtestlab/labs/myLab/v + irtualmachines/myLabVm" + tags: + description: + - The tags of the resource. + returned: always + type: complex + sample: "{ 'MyTag':'MyValue' }" +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.mgmt.devtestlabs import DevTestLabsClient + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMDtlCustomImageInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + lab_name=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + tags=dict( + type='list', + elements='str' + ) + ) + # store the results of the module operation + self.results = dict( + changed=False + ) + self.mgmt_client = None + self.resource_group = None + self.lab_name = None + self.name = None + self.tags = None + super(AzureRMDtlCustomImageInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False, facts_module=True) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_devtestlabcustomimage_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_devtestlabcustomimage_facts' module has been renamed to 'azure_rm_devtestlabcustomimage_info'", + version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient, + is_track2=True, + base_url=self._cloud_environment.endpoints.resource_manager) + + if self.name: + self.results['custom_images'] = self.get() + else: + self.results['custom_images'] = self.list() + return self.results + + def get(self): + response = None + results = [] + try: + response = self.mgmt_client.custom_images.get(resource_group_name=self.resource_group, + lab_name=self.lab_name, + name=self.name) + self.log("Response : {0}".format(response)) + except ResourceNotFoundError as e: + self.log('Could not get facts for Custom Image.') + + if response and self.has_tags(response.tags, self.tags): + results.append(self.format_response(response)) + + return results + + def list(self): + response = None + results = [] + try: + response = self.mgmt_client.custom_images.list(resource_group_name=self.resource_group, + lab_name=self.lab_name) + self.log("Response : {0}".format(response)) + except Exception as e: + self.log('Could not get facts for Custom Image.') + + if response is not None: + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(self.format_response(item)) + + return results + + def format_response(self, item): + d = item.as_dict() + d = { + 'resource_group': self.resource_group, + 'lab_name': self.lab_name, + 'name': d.get('name'), + 'id': d.get('id'), + 'managed_snapshot_id': d.get('managed_snapshot_id'), + 'source_vm_id': d.get('vm', {}).get('source_vm_id'), + 'tags': d.get('tags') + } + return d + + +def main(): + AzureRMDtlCustomImageInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabenvironment.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabenvironment.py new file mode 100644 index 000000000..cd3735ce0 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabenvironment.py @@ -0,0 +1,384 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Zim Kalinowski, (@zikalino) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_devtestlabenvironment +version_added: "0.1.2" +short_description: Manage Azure DevTest Lab Environment instance +description: + - Create, update and delete instance of Azure DevTest Lab Environment. + +options: + resource_group: + description: + - The name of the resource group. + required: True + type: str + lab_name: + description: + - The name of the lab. + required: True + type: str + user_name: + description: + - The name of the user profile. + required: True + type: str + name: + description: + - The name of the environment. + required: True + type: str + location: + description: + - The location of the resource. + type: str + deployment_template: + description: + - The Azure Resource Manager template's identifier. + type: raw + deployment_parameters: + description: + - The parameters of the Azure Resource Manager template. + type: list + suboptions: + name: + description: + - The name of the template parameter. + type: str + value: + description: + - The value of the template parameter. + type: str + state: + description: + - Assert the state of the Environment. + - Use C((present) to create or update an Environment and C(absent) to delete it. + default: present + type: str + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' +- name: Create instance of DevTest Lab Environment from public environment repo + azure_rm_devtestlabenvironment: + resource_group: myResourceGroup + lab_name: myLab + user_name: user + name: myEnvironment + location: eastus + deployment_template: + artifact_source_name: public environment repo + name: WebApp +''' + +RETURN = ''' +id: + description: + - The identifier of the resource. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/microsoft.devtestlab/labs/myLab/environment + s/myEnvironment" + +''' + +import time +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +from ansible.module_utils.common.dict_transformations import _snake_to_camel + +try: + from azure.core.polling import LROPoller + from azure.core.exceptions import ResourceNotFoundError + from msrestazure.azure_operation import AzureOperationPoller + from azure.mgmt.devtestlabs import DevTestLabsClient + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMDtlEnvironment(AzureRMModuleBase): + """Configuration class for an Azure RM Environment resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + lab_name=dict( + type='str', + required=True + ), + user_name=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + location=dict( + type='str' + ), + deployment_template=dict( + type='raw' + ), + deployment_parameters=dict( + type='list', + options=dict( + name=dict( + type='str' + ), + value=dict( + type='str' + ) + ) + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.lab_name = None + self.user_name = None + self.name = None + self.dtl_environment = dict() + + self.results = dict(changed=False) + self.mgmt_client = None + self.state = None + self.to_do = Actions.NoAction + + super(AzureRMDtlEnvironment, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()) + ['tags']: + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + self.dtl_environment[key] = kwargs[key] + + response = None + + self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient, + is_track2=True, + base_url=self._cloud_environment.endpoints.resource_manager) + + resource_group = self.get_resource_group(self.resource_group) + deployment_template = self.dtl_environment.pop('deployment_template', None) + if deployment_template: + if isinstance(deployment_template, dict): + if all(key in deployment_template for key in ('artifact_source_name', 'name')): + tmp = '/subscriptions/{0}/resourcegroups/{1}/providers/microsoft.devtestlab/labs/{2}/artifactSources/{3}/armTemplates/{4}' + deployment_template = tmp.format(self.subscription_id, + self.resource_group, + self.lab_name, + deployment_template['artifact_source_name'], + deployment_template['name']) + if not isinstance(deployment_template, str): + self.fail("parameter error: expecting deployment_template to contain [artifact_source, name]") + self.dtl_environment['deployment_properties'] = {} + self.dtl_environment['deployment_properties']['arm_template_id'] = deployment_template + self.dtl_environment['deployment_properties']['parameters'] = self.dtl_environment.pop('deployment_parameters', None) + + old_response = self.get_environment() + + if not old_response: + self.log("Environment instance doesn't exist") + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + self.log("Environment instance already exists") + if self.state == 'absent': + self.to_do = Actions.Delete + elif self.state == 'present': + if (not default_compare(self.dtl_environment, old_response, '', self.results)): + self.to_do = Actions.Update + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log("Need to Create / Update the Environment instance") + + if self.check_mode: + self.results['changed'] = True + return self.results + + response = self.create_update_environment() + + self.results['changed'] = True + self.log("Creation / Update done") + elif self.to_do == Actions.Delete: + self.log("Environment instance deleted") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_environment() + # This currently doesn't work as there is a bug in SDK / Service + if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller): + response = self.get_poller_result(response) + else: + self.log("Environment instance unchanged") + self.results['changed'] = False + response = old_response + + if self.state == 'present': + self.results.update({ + 'id': response.get('id', None) + }) + return self.results + + def create_update_environment(self): + ''' + Creates or updates Environment with the specified configuration. + + :return: deserialized Environment instance state dictionary + ''' + self.log("Creating / Updating the Environment instance {0}".format(self.name)) + + try: + if self.to_do == Actions.Create: + response = self.mgmt_client.environments.begin_create_or_update(resource_group_name=self.resource_group, + lab_name=self.lab_name, + user_name=self.user_name, + name=self.name, + dtl_environment=self.dtl_environment) + else: + response = self.mgmt_client.environments.update(resource_group_name=self.resource_group, + lab_name=self.lab_name, + user_name=self.user_name, + name=self.name, + dtl_environment=self.dtl_environment) + if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller): + response = self.get_poller_result(response) + + except Exception as exc: + self.log('Error attempting to create the Environment instance.') + self.fail("Error creating the Environment instance: {0}".format(str(exc))) + return response.as_dict() + + def delete_environment(self): + ''' + Deletes specified Environment instance in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the Environment instance {0}".format(self.name)) + try: + response = self.mgmt_client.environments.begin_delete(resource_group_name=self.resource_group, + lab_name=self.lab_name, + user_name=self.user_name, + name=self.name) + except Exception as e: + self.log('Error attempting to delete the Environment instance.') + self.fail("Error deleting the Environment instance: {0}".format(str(e))) + + return True + + def get_environment(self): + ''' + Gets the properties of the specified Environment. + + :return: deserialized Environment instance state dictionary + ''' + self.log("Checking if the Environment instance {0} is present".format(self.name)) + found = False + try: + response = self.mgmt_client.environments.get(resource_group_name=self.resource_group, + lab_name=self.lab_name, + user_name=self.user_name, + name=self.name) + found = True + self.log("Response : {0}".format(response)) + self.log("Environment instance : {0} found".format(response.name)) + except ResourceNotFoundError as e: + self.log('Did not find the Environment instance.') + if found is True: + return response.as_dict() + + return False + + +def default_compare(new, old, path, result): + if new is None: + return True + elif isinstance(new, dict): + if not isinstance(old, dict): + result['compare'] = 'changed [' + path + '] old dict is null' + return False + for k in new.keys(): + if not default_compare(new.get(k), old.get(k, None), path + '/' + k, result): + return False + return True + elif isinstance(new, list): + if not isinstance(old, list) or len(new) != len(old): + result['compare'] = 'changed [' + path + '] length is different or null' + return False + if isinstance(old[0], dict): + key = None + if 'id' in old[0] and 'id' in new[0]: + key = 'id' + elif 'name' in old[0] and 'name' in new[0]: + key = 'name' + else: + key = list(old[0])[0] + new = sorted(new, key=lambda x: x.get(key, None)) + old = sorted(old, key=lambda x: x.get(key, None)) + else: + new = sorted(new) + old = sorted(old) + for i in range(len(new)): + if not default_compare(new[i], old[i], path + '/*', result): + return False + return True + else: + if path == '/location': + new = new.replace(' ', '').lower() + old = new.replace(' ', '').lower() + if new == old: + return True + else: + result['compare'] = 'changed [' + path + '] ' + str(new) + ' != ' + str(old) + return False + + +def main(): + """Main execution""" + AzureRMDtlEnvironment() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabenvironment_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabenvironment_info.py new file mode 100644 index 000000000..675f6e848 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabenvironment_info.py @@ -0,0 +1,246 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Zim Kalinowski, (@zikalino) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_devtestlabenvironment_info +version_added: "0.1.2" +short_description: Get Azure Environment facts +description: + - Get facts of Azure Environment. + +options: + resource_group: + description: + - The name of the resource group. + required: True + type: str + lab_name: + description: + - The name of the lab. + required: True + type: str + user_name: + description: + - The name of the user profile. + required: True + type: str + name: + description: + - The name of the environment. + type: str + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Get instance of Environment + azure_rm_devtestlabenvironment_info: + resource_group: myResourceGroup + lab_name: myLab + user_name: myUser + name: myEnvironment + tags: + - key:value +''' + +RETURN = ''' +environments: + description: + - A list of dictionaries containing facts for Environment. + returned: always + type: complex + contains: + id: + description: + - The identifier of the artifact source. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DevTestLab/labs/myLab/sc + hedules/xxxxxxxx-xxxx-xxxx-xxxxx-xxxxxxxxxxxxx/environments/myEnvironment" + resource_group: + description: + - Name of the resource group. + returned: always + type: str + sample: myResourceGroup + lab_name: + description: + - Name of the lab. + returned: always + type: str + sample: myLab + name: + description: + - The name of the environment. + returned: always + type: str + sample: myEnvironment + deployment_template: + description: + - The identifier of the artifact source. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/microsoft.devtestlab/labs/mylab/art + ifactSources/public environment repo/armTemplates/WebApp" + resource_group_id: + description: + - Target resource group id. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myLab-myEnvironment-982571" + state: + description: + - Deployment state. + returned: always + type: str + sample: Succeeded + tags: + description: + - The tags of the resource. + returned: always + type: complex + sample: "{ 'MyTag': 'MyValue' }" +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.mgmt.devtestlabs import DevTestLabsClient + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMDtlEnvironmentInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + lab_name=dict( + type='str', + required=True + ), + user_name=dict( + type='str', + required=True + ), + name=dict( + type='str' + ), + tags=dict( + type='list', + elements='str' + ) + ) + # store the results of the module operation + self.results = dict( + changed=False + ) + self.mgmt_client = None + self.resource_group = None + self.lab_name = None + self.user_name = None + self.name = None + self.tags = None + super(AzureRMDtlEnvironmentInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False, facts_module=True) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_devtestlabenvironment_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_devtestlabenvironment_facts' module has been renamed to 'azure_rm_devtestlabenvironment_info'", + version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient, + is_track2=True, + base_url=self._cloud_environment.endpoints.resource_manager) + + if self.name: + self.results['environments'] = self.get() + else: + self.results['environments'] = self.list() + + return self.results + + def get(self): + response = None + results = [] + try: + response = self.mgmt_client.environments.get(resource_group_name=self.resource_group, + lab_name=self.lab_name, + user_name=self.user_name, + name=self.name) + self.log("Response : {0}".format(response)) + except ResourceNotFoundError as e: + self.log('Could not get facts for Environment.') + + if response and self.has_tags(response.tags, self.tags): + results.append(self.format_response(response)) + + return results + + def list(self): + response = None + results = [] + try: + response = self.mgmt_client.environments.list(resource_group_name=self.resource_group, + lab_name=self.lab_name, + user_name=self.user_name) + self.log("Response : {0}".format(response)) + except Exception as e: + self.log('Could not get facts for Environment.') + + if response is not None: + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(self.format_response(item)) + + return results + + def format_response(self, item): + d = item.as_dict() + d = { + 'resource_group': self.resource_group, + 'lab_name': self.lab_name, + 'name': d.get('name'), + 'user_name': self.user_name, + 'id': d.get('id', None), + 'deployment_template': d.get('deployment_properties', {}).get('arm_template_id'), + 'location': d.get('location'), + 'provisioning_state': d.get('provisioning_state'), + 'resource_group_id': d.get('resource_group_id'), + 'tags': d.get('tags', None) + } + return d + + +def main(): + AzureRMDtlEnvironmentInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabpolicy.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabpolicy.py new file mode 100644 index 000000000..f7aece825 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabpolicy.py @@ -0,0 +1,397 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Zim Kalinowski, (@zikalino) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_devtestlabpolicy +version_added: "0.1.2" +short_description: Manage Azure Policy instance +description: + - Create, update and delete instance of Azure Policy. + +options: + resource_group: + description: + - The name of the resource group. + required: True + lab_name: + description: + - The name of the lab. + required: True + policy_set_name: + description: + - The name of the policy set. + required: True + name: + description: + - The name of the policy. + required: True + description: + description: + - The description of the policy. + fact_name: + description: + - The fact name of the policy (e.g. C(lab_vm_count), C(lab_vm_size)), MaxVmsAllowedPerLab, etc. + choices: + - 'user_owned_lab_vm_count' + - 'user_owned_lab_premium_vm_count' + - 'lab_vm_count' + - 'lab_premium_vm_count' + - 'lab_vm_size' + - 'gallery_image' + - 'user_owned_lab_vm_count_in_subnet' + - 'lab_target_cost' + threshold: + description: + - The threshold of the policy (it could be either a maximum value or a list of allowed values). + type: raw + state: + description: + - Assert the state of the Policy. + - Use C(present) to create or update an Policy and C(absent) to delete it. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' +- name: Create DevTest Lab Policy + azure_rm_devtestlabpolicy: + resource_group: myResourceGroup + lab_name: myLab + policy_set_name: myPolicySet + name: myPolicy + fact_name: user_owned_lab_vm_count + threshold: 5 +''' + +RETURN = ''' +id: + description: + - The identifier of the resource. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/microsoft.devtestlab/labs/myLab/policySets/ + myPolicySet/policies/myPolicy" + +''' + +import time +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +from ansible.module_utils.common.dict_transformations import _snake_to_camel + +try: + from azure.core.polling import LROPoller + from azure.core.exceptions import ResourceNotFoundError + from msrestazure.azure_operation import AzureOperationPoller + from azure.mgmt.devtestlabs import DevTestLabsClient + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMDtlPolicy(AzureRMModuleBase): + """Configuration class for an Azure RM Policy resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + lab_name=dict( + type='str', + required=True + ), + policy_set_name=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + description=dict( + type='str' + ), + fact_name=dict( + type='str', + choices=['user_owned_lab_vm_count', + 'user_owned_lab_premium_vm_count', + 'lab_vm_count', + 'lab_premium_vm_count', + 'lab_vm_size', + 'gallery_image', + 'user_owned_lab_vm_count_in_subnet', + 'lab_target_cost'] + ), + threshold=dict( + type='raw' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.lab_name = None + self.policy_set_name = None + self.name = None + self.policy = dict() + + self.results = dict(changed=False) + self.mgmt_client = None + self.state = None + self.to_do = Actions.NoAction + + required_if = [ + ('state', 'present', ['threshold', 'fact_name']) + ] + + super(AzureRMDtlPolicy, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True, + required_if=required_if) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()) + ['tags']: + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + self.policy[key] = kwargs[key] + + if self.state == 'present': + self.policy['status'] = 'Enabled' + dict_camelize(self.policy, ['fact_name'], True) + if isinstance(self.policy['threshold'], list): + self.policy['evaluator_type'] = 'AllowedValuesPolicy' + else: + self.policy['evaluator_type'] = 'MaxValuePolicy' + + response = None + + self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient, + is_track2=True, + base_url=self._cloud_environment.endpoints.resource_manager) + + resource_group = self.get_resource_group(self.resource_group) + + old_response = self.get_policy() + + if not old_response: + self.log("Policy instance doesn't exist") + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + self.log("Policy instance already exists") + if self.state == 'absent': + self.to_do = Actions.Delete + elif self.state == 'present': + if (not default_compare(self.policy, old_response, '', self.results)): + self.to_do = Actions.Update + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log("Need to Create / Update the Policy instance") + + if self.check_mode: + self.results['changed'] = True + return self.results + + response = self.create_update_policy() + + self.results['changed'] = True + self.log("Creation / Update done") + elif self.to_do == Actions.Delete: + self.log("Policy instance deleted") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_policy() + # This currently doesnt' work as there is a bug in SDK / Service + if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller): + response = self.get_poller_result(response) + else: + self.log("Policy instance unchanged") + self.results['changed'] = False + response = old_response + + if self.state == 'present': + self.results.update({ + 'id': response.get('id', None), + 'status': response.get('status', None) + }) + return self.results + + def create_update_policy(self): + ''' + Creates or updates Policy with the specified configuration. + + :return: deserialized Policy instance state dictionary + ''' + self.log("Creating / Updating the Policy instance {0}".format(self.name)) + + try: + response = self.mgmt_client.policies.create_or_update(resource_group_name=self.resource_group, + lab_name=self.lab_name, + policy_set_name=self.policy_set_name, + name=self.name, + policy=self.policy) + if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller): + response = self.get_poller_result(response) + + except Exception as exc: + self.log('Error attempting to create the Policy instance.') + self.fail("Error creating the Policy instance: {0}".format(str(exc))) + return response.as_dict() + + def delete_policy(self): + ''' + Deletes specified Policy instance in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the Policy instance {0}".format(self.name)) + try: + response = self.mgmt_client.policies.delete(resource_group_name=self.resource_group, + lab_name=self.lab_name, + policy_set_name=self.policy_set_name, + name=self.name) + except Exception as e: + self.log('Error attempting to delete the Policy instance.') + self.fail("Error deleting the Policy instance: {0}".format(str(e))) + + return True + + def get_policy(self): + ''' + Gets the properties of the specified Policy. + + :return: deserialized Policy instance state dictionary + ''' + self.log("Checking if the Policy instance {0} is present".format(self.name)) + found = False + try: + response = self.mgmt_client.policies.get(resource_group_name=self.resource_group, + lab_name=self.lab_name, + policy_set_name=self.policy_set_name, + name=self.name) + found = True + self.log("Response : {0}".format(response)) + self.log("Policy instance : {0} found".format(response.name)) + except ResourceNotFoundError as e: + self.log('Did not find the Policy instance.') + if found is True: + return response.as_dict() + + return False + + +def default_compare(new, old, path, result): + if new is None: + return True + elif isinstance(new, dict): + if not isinstance(old, dict): + result['compare'] = 'changed [' + path + '] old dict is null' + return False + for k in new.keys(): + if not default_compare(new.get(k), old.get(k, None), path + '/' + k, result): + return False + return True + elif isinstance(new, list): + if not isinstance(old, list) or len(new) != len(old): + result['compare'] = 'changed [' + path + '] length is different or null' + return False + if isinstance(old[0], dict): + key = None + if 'id' in old[0] and 'id' in new[0]: + key = 'id' + elif 'name' in old[0] and 'name' in new[0]: + key = 'name' + else: + key = list(old[0])[0] + new = sorted(new, key=lambda x: x.get(key, None)) + old = sorted(old, key=lambda x: x.get(key, None)) + else: + new = sorted(new) + old = sorted(old) + for i in range(len(new)): + if not default_compare(new[i], old[i], path + '/*', result): + return False + return True + else: + if path == '/location': + new = new.replace(' ', '').lower() + old = new.replace(' ', '').lower() + if str(new) == str(old): + return True + else: + result['compare'] = 'changed [' + path + '] ' + str(new) + ' != ' + str(old) + return False + + +def dict_camelize(d, path, camelize_first): + if isinstance(d, list): + for i in range(len(d)): + dict_camelize(d[i], path, camelize_first) + elif isinstance(d, dict): + if len(path) == 1: + old_value = d.get(path[0], None) + if old_value is not None: + d[path[0]] = _snake_to_camel(old_value, camelize_first) + else: + sd = d.get(path[0], None) + if sd is not None: + dict_camelize(sd, path[1:], camelize_first) + + +def dict_map(d, path, map): + if isinstance(d, list): + for i in range(len(d)): + dict_map(d[i], path, map) + elif isinstance(d, dict): + if len(path) == 1: + old_value = d.get(path[0], None) + if old_value is not None: + d[path[0]] = map.get(old_value, old_value) + else: + sd = d.get(path[0], None) + if sd is not None: + dict_map(sd, path[1:], map) + + +def main(): + """Main execution""" + AzureRMDtlPolicy() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabpolicy_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabpolicy_info.py new file mode 100644 index 000000000..d6d452871 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabpolicy_info.py @@ -0,0 +1,243 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Zim Kalinowski, (@zikalino) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_devtestlabpolicy_info +version_added: "0.1.2" +short_description: Get Azure DTL Policy facts +description: + - Get facts of Azure DTL Policy. + +options: + resource_group: + description: + - The name of the resource group. + required: True + type: str + lab_name: + description: + - The name of the lab. + required: True + type: str + policy_set_name: + description: + - The name of the policy set. + required: True + type: str + name: + description: + - The name of the policy. + type: str + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Get instance of Policy + azure_rm_devtestlabpolicy_info: + resource_group: myResourceGroup + lab_name: myLab + policy_set_name: myPolicySet + name: myPolicy + tags: + - key:value +''' + +RETURN = ''' +policies: + description: + - A list of dictionaries containing facts for Policy. + returned: always + type: complex + contains: + id: + description: + - The identifier of the artifact source. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DevTestLab/labs/myLab/po + licysets/myPolicySet/policies/myPolicy" + resource_group: + description: + - Name of the resource group. + returned: always + type: str + sample: myResourceGroup + lab_name: + description: + - Name of the lab. + returned: always + type: str + sample: myLab + name: + description: + - The name of the artifact source. + returned: always + type: str + sample: myArtifactSource + fact_name: + description: + - The name of the policy fact. + returned: always + type: str + sample: UserOwnedLabVmCount + evaluator_type: + description: + - Evaluator type for policy fact. + returned: always + type: str + sample: MaxValuePolicy + threshold: + description: + - Fact's threshold. + returned: always + type: str + sample: 5 + tags: + description: + - The tags of the resource. + returned: always + type: complex + sample: "{ 'MyTag': 'MyValue' }" +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.mgmt.devtestlabs import DevTestLabsClient + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMDtlPolicyInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + lab_name=dict( + type='str', + required=True + ), + policy_set_name=dict( + type='str', + required=True + ), + name=dict( + type='str' + ), + tags=dict( + type='list', + elements='str' + ) + ) + # store the results of the module operation + self.results = dict( + changed=False + ) + self.mgmt_client = None + self.resource_group = None + self.lab_name = None + self.policy_set_name = None + self.name = None + self.tags = None + super(AzureRMDtlPolicyInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False, facts_module=True) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_devtestlabpolicy_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_devtestlabpolicy_facts' module has been renamed to 'azure_rm_devtestlabpolicy_info'", version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient, + is_track2=True, + base_url=self._cloud_environment.endpoints.resource_manager) + + if self.name: + self.results['policies'] = self.get() + else: + self.results['policies'] = self.list() + + return self.results + + def get(self): + response = None + results = [] + try: + response = self.mgmt_client.policies.get(resource_group_name=self.resource_group, + lab_name=self.lab_name, + policy_set_name=self.policy_set_name, + name=self.name) + self.log("Response : {0}".format(response)) + except ResourceNotFoundError as e: + self.log('Could not get facts for Policy.') + + if response and self.has_tags(response.tags, self.tags): + results.append(self.format_response(response)) + + return results + + def list(self): + response = None + results = [] + try: + response = self.mgmt_client.policies.list(resource_group_name=self.resource_group, + lab_name=self.lab_name, + policy_set_name=self.policy_set_name) + self.log("Response : {0}".format(response)) + except Exception as e: + self.log('Could not get facts for Policy.') + + if response is not None: + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(self.format_response(item)) + + return results + + def format_response(self, item): + d = item.as_dict() + d = { + 'resource_group': self.resource_group, + 'policy_set_name': self.policy_set_name, + 'name': d.get('name'), + 'id': d.get('id'), + 'tags': d.get('tags'), + 'status': d.get('status'), + 'threshold': d.get('threshold'), + 'fact_name': d.get('fact_name'), + 'evaluator_type': d.get('evaluator_type') + } + return d + + +def main(): + AzureRMDtlPolicyInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabschedule.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabschedule.py new file mode 100644 index 000000000..c672cb274 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabschedule.py @@ -0,0 +1,337 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Zim Kalinowski, (@zikalino) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_devtestlabschedule +version_added: "0.1.2" +short_description: Manage Azure DevTest Lab Schedule instance +description: + - Create, update and delete instance of Azure DecTest Lab Schedule. + +options: + resource_group: + description: + - The name of the resource group. + required: True + lab_name: + description: + - The name of the lab. + required: True + name: + description: + - The name of the schedule. + required: True + choices: + - lab_vms_startup + - lab_vms_shutdown + time: + description: + - The time of day the schedule will occur. + time_zone_id: + description: + - The time zone ID. + state: + description: + - Assert the state of the Schedule. + - Use C(present) to create or update an Schedule and C(absent) to delete it. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Create (or update) DevTest Lab Schedule + azure_rm_devtestlabschedule: + resource_group: myResourceGroup + lab_name: myLab + name: lab_vms_shutdown + time: "1030" + time_zone_id: "UTC+12" +''' + +RETURN = ''' +id: + description: + - The identifier of the resource. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/microsoft.devtestlab/labs/myLab/schedules/l + abVmsShutdown" +''' + +import time +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +from ansible.module_utils.common.dict_transformations import _snake_to_camel + +try: + from azure.core.polling import LROPoller + from azure.core.exceptions import ResourceNotFoundError + from msrestazure.azure_operation import AzureOperationPoller + from azure.mgmt.devtestlabs import DevTestLabsClient + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMSchedule(AzureRMModuleBase): + """Configuration class for an Azure RM Schedule resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + lab_name=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True, + choices=['lab_vms_startup', 'lab_vms_shutdown'] + ), + time=dict( + type='str' + ), + time_zone_id=dict( + type='str' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.lab_name = None + self.name = None + self.schedule = dict() + + self.results = dict(changed=False) + self.mgmt_client = None + self.state = None + self.to_do = Actions.NoAction + + required_if = [ + ('state', 'present', ['time', 'time_zone_id']) + ] + + super(AzureRMSchedule, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True, + required_if=required_if) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()) + ['tags']: + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + self.schedule[key] = kwargs[key] + + self.schedule['status'] = "Enabled" + + if self.name == 'lab_vms_startup': + self.name = 'LabVmsStartup' + self.schedule['task_type'] = 'LabVmsStartupTask' + elif self.name == 'lab_vms_shutdown': + self.name = 'LabVmsShutdown' + self.schedule['task_type'] = 'LabVmsShutdownTask' + + if self.state == 'present': + self.schedule['daily_recurrence'] = {'time': self.schedule.pop('time')} + self.schedule['time_zone_id'] = self.schedule['time_zone_id'].upper() + + response = None + + self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient, + is_track2=True, + base_url=self._cloud_environment.endpoints.resource_manager) + + resource_group = self.get_resource_group(self.resource_group) + + old_response = self.get_schedule() + + if not old_response: + self.log("Schedule instance doesn't exist") + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + self.log("Schedule instance already exists") + if self.state == 'absent': + self.to_do = Actions.Delete + elif self.state == 'present': + if (not default_compare(self.schedule, old_response, '', self.results)): + self.to_do = Actions.Update + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log("Need to Create / Update the Schedule instance") + + if self.check_mode: + self.results['changed'] = True + return self.results + + response = self.create_update_schedule() + + self.results['changed'] = True + self.log("Creation / Update done") + elif self.to_do == Actions.Delete: + self.log("Schedule instance deleted") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_schedule() + # This currently doesn't work as there is a bug in SDK / Service + if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller): + response = self.get_poller_result(response) + else: + self.log("Schedule instance unchanged") + self.results['changed'] = False + response = old_response + + if self.state == 'present': + self.results.update({ + 'id': response.get('id', None) + }) + return self.results + + def create_update_schedule(self): + ''' + Creates or updates Schedule with the specified configuration. + + :return: deserialized Schedule instance state dictionary + ''' + self.log("Creating / Updating the Schedule instance {0}".format(self.name)) + + try: + response = self.mgmt_client.schedules.create_or_update(resource_group_name=self.resource_group, + lab_name=self.lab_name, + name=self.name, + schedule=self.schedule) + if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller): + response = self.get_poller_result(response) + + except Exception as exc: + self.log('Error attempting to create the Schedule instance.') + self.fail("Error creating the Schedule instance: {0}".format(str(exc))) + return response.as_dict() + + def delete_schedule(self): + ''' + Deletes specified Schedule instance in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the Schedule instance {0}".format(self.name)) + try: + response = self.mgmt_client.schedules.delete(resource_group_name=self.resource_group, + lab_name=self.lab_name, + name=self.name) + except Exception as e: + self.log('Error attempting to delete the Schedule instance.') + self.fail("Error deleting the Schedule instance: {0}".format(str(e))) + + return True + + def get_schedule(self): + ''' + Gets the properties of the specified Schedule. + + :return: deserialized Schedule instance state dictionary + ''' + self.log("Checking if the Schedule instance {0} is present".format(self.name)) + found = False + try: + response = self.mgmt_client.schedules.get(resource_group_name=self.resource_group, + lab_name=self.lab_name, + name=self.name) + found = True + self.log("Response : {0}".format(response)) + self.log("Schedule instance : {0} found".format(response.name)) + except ResourceNotFoundError as e: + self.log('Did not find the Schedule instance.') + if found is True: + return response.as_dict() + + return False + + +def default_compare(new, old, path, result): + if new is None: + return True + elif isinstance(new, dict): + if not isinstance(old, dict): + result['compare'] = 'changed [' + path + '] old dict is null' + return False + for k in new.keys(): + if not default_compare(new.get(k), old.get(k, None), path + '/' + k, result): + return False + return True + elif isinstance(new, list): + if not isinstance(old, list) or len(new) != len(old): + result['compare'] = 'changed [' + path + '] length is different or null' + return False + if isinstance(old[0], dict): + key = None + if 'id' in old[0] and 'id' in new[0]: + key = 'id' + elif 'name' in old[0] and 'name' in new[0]: + key = 'name' + else: + key = list(old[0])[0] + new = sorted(new, key=lambda x: x.get(key, None)) + old = sorted(old, key=lambda x: x.get(key, None)) + else: + new = sorted(new) + old = sorted(old) + for i in range(len(new)): + if not default_compare(new[i], old[i], path + '/*', result): + return False + return True + else: + if path == '/location': + new = new.replace(' ', '').lower() + old = new.replace(' ', '').lower() + if new == old: + return True + else: + result['compare'] = 'changed [' + path + '] ' + str(new) + ' != ' + str(old) + return False + + +def main(): + """Main execution""" + AzureRMSchedule() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabschedule_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabschedule_info.py new file mode 100644 index 000000000..dae349d2d --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabschedule_info.py @@ -0,0 +1,222 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Zim Kalinowski, (@zikalino) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_devtestlabschedule_info +version_added: "0.1.2" +short_description: Get Azure Schedule facts +description: + - Get facts of Azure Schedule. + +options: + resource_group: + description: + - The name of the resource group. + required: True + type: str + lab_name: + description: + - The name of the lab. + required: True + type: str + name: + description: + - The name of the schedule. + type: str + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Get instance of Schedule + azure_rm_devtestlabschedule_info: + resource_group: myResourceGroup + lab_name: myLab + name: mySchedule + tags: + - key:value +''' + +RETURN = ''' +schedules: + description: + - A list of dictionaries containing facts for Schedule. + returned: always + type: complex + contains: + id: + description: + - The identifier of the artifact source. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DevTestLab/labs/myLab/sc + hedules/labvmsshutdown" + resource_group: + description: + - Name of the resource group. + returned: always + type: str + sample: myResourceGroup + lab_name: + description: + - Name of the lab. + returned: always + type: str + sample: myLab + name: + description: + - The name of the environment. + returned: always + type: str + sample: lab_vms_shutdown + time: + description: + - Time of the schedule. + returned: always + type: str + sample: lab_vms_shutdown + time_zone_id: + description: + - Time zone id. + returned: always + type: str + sample: UTC+12 + tags: + description: + - The tags of the resource. + returned: always + type: complex + sample: "{ 'MyTag': 'MyValue' }" +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +from ansible.module_utils.common.dict_transformations import _camel_to_snake, _snake_to_camel + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.mgmt.devtestlabs import DevTestLabsClient + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMDtlScheduleInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + lab_name=dict( + type='str', + required=True + ), + name=dict( + type='str' + ), + tags=dict( + type='list', + elements='str' + ) + ) + # store the results of the module operation + self.results = dict( + changed=False + ) + self.mgmt_client = None + self.resource_group = None + self.lab_name = None + self.name = None + self.tags = None + super(AzureRMDtlScheduleInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False, facts_module=True) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_devtestlabschedule_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_devtestlabschedule_facts' module has been renamed to 'azure_rm_devtestlabschedule_info'", version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient, + is_track2=True, + base_url=self._cloud_environment.endpoints.resource_manager) + if self.name: + self.results['schedules'] = self.get() + else: + self.results['schedules'] = self.list() + + return self.results + + def get(self): + response = None + results = [] + try: + response = self.mgmt_client.schedules.get(resource_group_name=self.resource_group, + lab_name=self.lab_name, + name=_snake_to_camel(self.name)) + self.log("Response : {0}".format(response)) + except ResourceNotFoundError as e: + self.log('Could not get facts for Schedule.') + + if response and self.has_tags(response.tags, self.tags): + results.append(self.format_response(response)) + + return results + + def list(self): + response = None + results = [] + try: + response = self.mgmt_client.schedules.list(resource_group_name=self.resource_group, + lab_name=self.lab_name) + self.log("Response : {0}".format(response)) + except Exception as e: + self.log('Could not get facts for Schedule.') + + if response is not None: + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(self.format_response(item)) + + return results + + def format_response(self, item): + d = item.as_dict() + d = { + 'resource_group': self.resource_group, + 'lab_name': self.lab_name, + 'name': _camel_to_snake(d.get('name')), + 'id': d.get('id', None), + 'tags': d.get('tags', None), + 'time': d.get('daily_recurrence', {}).get('time'), + 'time_zone_id': d.get('time_zone_id') + } + return d + + +def main(): + AzureRMDtlScheduleInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabvirtualmachine.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabvirtualmachine.py new file mode 100644 index 000000000..f1edde75b --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabvirtualmachine.py @@ -0,0 +1,540 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Zim Kalinowski, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_devtestlabvirtualmachine +version_added: "0.1.2" +short_description: Manage Azure DevTest Lab Virtual Machine instance +description: + - Create, update and delete instance of Azure DevTest Lab Virtual Machine. + +options: + resource_group: + description: + - The name of the resource group. + required: True + lab_name: + description: + - The name of the lab. + required: True + name: + description: + - The name of the virtual machine. + required: True + notes: + description: + - The notes of the virtual machine. + os_type: + description: + - Base type of operating system. + choices: + - windows + - linux + vm_size: + description: + - A valid Azure VM size value. For example, C(Standard_D4). + - The list of choices varies depending on the subscription and location. Check your subscription for available choices. + - Available values can be found on this website, link U(https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes-general). + - Required when I(state=present). + user_name: + description: + - The user name of the virtual machine. + password: + description: + - The password of the virtual machine administrator. + ssh_key: + description: + - The SSH key of the virtual machine administrator. + lab_subnet: + description: + - An existing subnet within lab's virtual network. + - It can be the subnet's resource id. + - It can be a dict which contains C(virtual_network_name) and C(name). + disallow_public_ip_address: + description: + - Indicates whether the virtual machine is to be created without a public IP address. + artifacts: + description: + - The artifacts to be installed on the virtual machine. + type: list + suboptions: + source_name: + description: + - The artifact's source name. + source_path: + description: + - The artifact's path in the source repository. + parameters: + description: + - The parameters of the artifact. + type: list + suboptions: + name: + description: + - The name of the artifact parameter. + value: + description: + - The value of the artifact parameter. + image: + description: + - The Microsoft Azure Marketplace image reference of the virtual machine. + suboptions: + offer: + description: + - The offer of the gallery image. + publisher: + description: + - The publisher of the gallery image. + sku: + description: + - The SKU of the gallery image. + os_type: + description: + - The OS type of the gallery image. + version: + description: + - The version of the gallery image. + expiration_date: + description: + - The expiration date for VM. + allow_claim: + description: + - Indicates whether another user can take ownership of the virtual machine. + storage_type: + description: + - Storage type to use for virtual machine. + choices: + - standard + - premium + state: + description: + - Assert the state of the Virtual Machine. + - Use C(present) to create or update an Virtual Machine and C(absent) to delete it. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Create (or update) Virtual Machine + azure_rm_devtestlabvirtualmachine: + resource_group: myrg + lab_name: mylab + name: myvm + notes: Virtual machine notes.... + os_type: linux + vm_size: Standard_A2_v2 + user_name: vmadmin + password: ZSuppas$$21! + lab_subnet: + name: myvnSubnet + virtual_network_name: myvn + disallow_public_ip_address: no + image: + offer: UbuntuServer + publisher: Canonical + sku: 16.04-LTS + os_type: Linux + version: latest + artifacts: + - source_name: myartifact + source_path: "/Artifacts/linux-install-mongodb" + allow_claim: no + expiration_date: "2019-02-22T01:49:12.117974Z" +''' + +RETURN = ''' +id: + description: + - The identifier of the DTL Virtual Machine resource. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myrg/providers/microsoft.devtestlab/labs/mylab/virtualmachines/myvm +compute_id: + description: + - The identifier of the underlying Compute Virtual Machine resource. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myrg/providers/microsoft.devtestlab/labs/mylab/virtualmachines/myvm +fqdn: + description: + - Fully qualified domain name or IP Address of the virtual machine. + returned: always + type: str + sample: myvm.eastus.cloudapp.azure.com +''' + +import time +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +from ansible.module_utils.common.dict_transformations import _snake_to_camel + +try: + from azure.core.polling import LROPoller + from azure.core.exceptions import ResourceNotFoundError + from msrestazure.azure_operation import AzureOperationPoller + from azure.mgmt.devtestlabs import DevTestLabsClient + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMVirtualMachine(AzureRMModuleBase): + """Configuration class for an Azure RM Virtual Machine resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + lab_name=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + notes=dict( + type='str' + ), + os_type=dict( + type='str', + choices=['linux', 'windows'] + ), + vm_size=dict( + type='str' + ), + user_name=dict( + type='str' + ), + password=dict( + type='str', + no_log=True + ), + ssh_key=dict( + type='str', + no_log=True + ), + lab_subnet=dict( + type='raw' + ), + disallow_public_ip_address=dict( + type='str' + ), + artifacts=dict( + type='list', + options=dict( + artifact_id=dict( + type='str' + ), + parameters=dict( + type='list', + options=dict( + name=dict( + type='str' + ), + value=dict( + type='str' + ) + ) + ) + ) + ), + image=dict( + type='dict', + options=dict( + offer=dict( + type='str' + ), + publisher=dict( + type='str' + ), + sku=dict( + type='str' + ), + os_type=dict( + type='str' + ), + version=dict( + type='str' + ) + ) + ), + expiration_date=dict( + type='str' + ), + allow_claim=dict( + type='str' + ), + storage_type=dict( + type='str', + choices=['standard', 'premium'] + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + required_if = [ + ('state', 'present', [ + 'image', 'lab_subnet', 'vm_size', 'os_type']) + ] + + self.resource_group = None + self.lab_name = None + self.name = None + self.lab_virtual_machine = dict() + + self.results = dict(changed=False) + self.mgmt_client = None + self.state = None + self.to_do = Actions.NoAction + + super(AzureRMVirtualMachine, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True, + required_if=required_if) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()) + ['tags']: + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + self.lab_virtual_machine[key] = kwargs[key] + + self.lab_virtual_machine['gallery_image_reference'] = self.lab_virtual_machine.pop('image', None) + + if self.lab_virtual_machine.get('artifacts') is not None: + for artifact in self.lab_virtual_machine.get('artifacts'): + source_name = artifact.pop('source_name') + source_path = artifact.pop('source_path') + template = "/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.DevTestLab/labs/{2}/artifactsources/{3}{4}" + artifact['artifact_id'] = template.format(self.subscription_id, self.resource_group, self.lab_name, source_name, source_path) + + self.lab_virtual_machine['size'] = self.lab_virtual_machine.pop('vm_size') + self.lab_virtual_machine['os_type'] = _snake_to_camel(self.lab_virtual_machine['os_type'], True) + + if self.lab_virtual_machine.get('storage_type'): + self.lab_virtual_machine['storage_type'] = _snake_to_camel(self.lab_virtual_machine['storage_type'], True) + + lab_subnet = self.lab_virtual_machine.pop('lab_subnet') + + if isinstance(lab_subnet, str): + vn_and_subnet = lab_subnet.split('/subnets/') + if (len(vn_and_subnet) == 2): + self.lab_virtual_machine['lab_virtual_network_id'] = vn_and_subnet[0] + self.lab_virtual_machine['lab_subnet_name'] = vn_and_subnet[1] + else: + self.fail("Invalid 'lab_subnet' resource id format") + else: + template = "/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.DevTestLab/labs/{2}/virtualnetworks/{3}" + self.lab_virtual_machine['lab_virtual_network_id'] = template.format(self.subscription_id, + self.resource_group, + self.lab_name, + lab_subnet.get('virtual_network_name')) + self.lab_virtual_machine['lab_subnet_name'] = lab_subnet.get('name') + + response = None + + self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient, + is_track2=True, + base_url=self._cloud_environment.endpoints.resource_manager) + + old_response = self.get_virtualmachine() + + if not old_response: + self.log("Virtual Machine instance doesn't exist") + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + # get location from the lab as it has to be the same and has to be specified (why??) + lab = self.get_devtestlab() + self.lab_virtual_machine['location'] = lab['location'] + else: + self.log("Virtual Machine instance already exists") + if self.state == 'absent': + self.to_do = Actions.Delete + elif self.state == 'present': + self.lab_virtual_machine['location'] = old_response['location'] + + if old_response['size'].lower() != self.lab_virtual_machine.get('size').lower(): + self.lab_virtual_machine['size'] = old_response['size'] + self.module.warn("Property 'size' cannot be changed") + + if self.lab_virtual_machine.get('storage_type') is not None and \ + old_response['storage_type'].lower() != self.lab_virtual_machine.get('storage_type').lower(): + self.lab_virtual_machine['storage_type'] = old_response['storage_type'] + self.module.warn("Property 'storage_type' cannot be changed") + + if old_response.get('gallery_image_reference', {}) != self.lab_virtual_machine.get('gallery_image_reference', {}): + self.lab_virtual_machine['gallery_image_reference'] = old_response['gallery_image_reference'] + self.module.warn("Property 'image' cannot be changed") + + # currently artifacts can be only specified when vm is created + # and in addition we don't have detailed information, just a number of "total artifacts" + if len(self.lab_virtual_machine.get('artifacts', [])) != old_response['artifact_deployment_status']['total_artifacts']: + self.module.warn("Property 'artifacts' cannot be changed") + + if self.lab_virtual_machine.get('disallow_public_ip_address') is not None: + if old_response['disallow_public_ip_address'] != self.lab_virtual_machine.get('disallow_public_ip_address'): + self.module.warn("Property 'disallow_public_ip_address' cannot be changed") + self.lab_virtual_machine['disallow_public_ip_address'] = old_response['disallow_public_ip_address'] + + if self.lab_virtual_machine.get('allow_claim') is not None: + if old_response['allow_claim'] != self.lab_virtual_machine.get('allow_claim'): + self.module.warn("Property 'allow_claim' cannot be changed") + self.lab_virtual_machine['allow_claim'] = old_response['allow_claim'] + + if self.lab_virtual_machine.get('notes') is not None: + if old_response['notes'] != self.lab_virtual_machine.get('notes'): + self.to_do = Actions.Update + else: + self.lab_virtual_machine['notes'] = old_response['notes'] + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log("Need to Create / Update the Virtual Machine instance") + + self.results['changed'] = True + if self.check_mode: + return self.results + + response = self.create_update_virtualmachine() + + self.log("Creation / Update done") + elif self.to_do == Actions.Delete: + self.log("Virtual Machine instance deleted") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_virtualmachine() + else: + self.log("Virtual Machine instance unchanged") + self.results['changed'] = False + response = old_response + + if self.state == 'present': + self.results.update({ + 'id': response.get('id', None), + 'compute_id': response.get('compute_id', None), + 'fqdn': response.get('fqdn', None) + }) + return self.results + + def create_update_virtualmachine(self): + ''' + Creates or updates Virtual Machine with the specified configuration. + + :return: deserialized Virtual Machine instance state dictionary + ''' + self.log("Creating / Updating the Virtual Machine instance {0}".format(self.name)) + + try: + response = self.mgmt_client.virtual_machines.begin_create_or_update(resource_group_name=self.resource_group, + lab_name=self.lab_name, + name=self.name, + lab_virtual_machine=self.lab_virtual_machine) + if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller): + response = self.get_poller_result(response) + + except Exception as exc: + self.log('Error attempting to create the Virtual Machine instance.') + self.fail("Error creating the Virtual Machine instance: {0}".format(str(exc))) + return response.as_dict() + + def delete_virtualmachine(self): + ''' + Deletes specified Virtual Machine instance in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the Virtual Machine instance {0}".format(self.name)) + try: + response = self.mgmt_client.virtual_machines.begin_delete(resource_group_name=self.resource_group, + lab_name=self.lab_name, + name=self.name) + except Exception as e: + self.log('Error attempting to delete the Virtual Machine instance.') + self.fail("Error deleting the Virtual Machine instance: {0}".format(str(e))) + + if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller): + response = self.get_poller_result(response) + + return True + + def get_virtualmachine(self): + ''' + Gets the properties of the specified Virtual Machine. + + :return: deserialized Virtual Machine instance state dictionary + ''' + self.log("Checking if the Virtual Machine instance {0} is present".format(self.name)) + found = False + try: + response = self.mgmt_client.virtual_machines.get(resource_group_name=self.resource_group, + lab_name=self.lab_name, + name=self.name) + found = True + self.log("Response : {0}".format(response)) + self.log("Virtual Machine instance : {0} found".format(response.name)) + except ResourceNotFoundError as e: + self.log('Did not find the Virtual Machine instance.') + if found is True: + return response.as_dict() + + return False + + def get_devtestlab(self): + ''' + Gets the properties of the specified DevTest Lab. + + :return: deserialized DevTest Lab instance state dictionary + ''' + self.log("Checking if the DevTest Lab instance {0} is present".format(self.lab_name)) + try: + response = self.mgmt_client.labs.get(resource_group_name=self.resource_group, + name=self.lab_name) + self.log("Response : {0}".format(response)) + self.log("DevTest Lab instance : {0} found".format(response.name)) + return response.as_dict() + except ResourceNotFoundError as e: + self.fail('Did not find the DevTest Lab instance.') + return False + + +def main(): + """Main execution""" + AzureRMVirtualMachine() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabvirtualmachine_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabvirtualmachine_info.py new file mode 100644 index 000000000..f33f2b976 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabvirtualmachine_info.py @@ -0,0 +1,329 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Zim Kalinowski, (@zikalino) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_devtestlabvirtualmachine_info +version_added: "0.1.2" +short_description: Get Azure DevTest Lab Virtual Machine facts +description: + - Get facts of Azure DevTest Lab Virtual Machine. + +options: + resource_group: + description: + - The name of the resource group. + required: True + type: str + lab_name: + description: + - The name of the lab. + required: True + type: str + name: + description: + - The name of the virtual machine. + type: str + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Get instance of DTL Virtual Machine + azure_rm_devtestlabvirtualmachine_info: + resource_group: myResourceGroup + lab_name: myLab + name: myVm + tags: + - key:value +''' + +RETURN = ''' +virtualmachines: + description: + - A list of dictionaries containing facts for DevTest Lab Virtual Machine. + returned: always + type: complex + contains: + id: + description: + - The identifier of the virtual machine. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/microsoft.devtestlab/labs/myLab/virt + ualmachines/myVm" + resource_group: + description: + - Name of the resource group. + returned: always + type: str + sample: myResourceGroup + lab_name: + description: + - Name of the lab. + returned: always + type: str + sample: myLab + name: + description: + - Name of the virtual machine. + returned: always + type: str + sample: myVm + notes: + description: + - Notes of the virtual machine. + returned: always + type: str + sample: My VM notes + disallow_public_ip_address: + description: + - Whether public IP should be not allowed. + returned: always + type: bool + sample: false + expiration_date: + description: + - Virtual machine expiration date. + returned: always + type: str + sample: "2029-02-22T01:49:12.117974Z" + image: + description: + - Gallery image reference. + returned: always + type: complex + contains: + offer: + description: + - The offer of the gallery image. + returned: when created from gallery image + type: str + sample: UbuntuServer + os_type: + description: + - Operating system type. + returned: when created from gallery image + type: str + sample: Linux + sku: + description: + - The SKU of the gallery image. + returned: when created from gallery image + type: str + sample: 16.04-LTS + publisher: + description: + - The publisher of the gallery image. + returned: when created from gallery image + type: str + sample: Canonical + version: + description: + - The version of the gallery image. + returned: when created from gallery image + type: str + sample: latest + os_type: + description: + - Operating system type. + returned: always + type: str + sample: linux + vm_size: + description: + - Virtual machine size. + returned: always + type: str + sample: Standard_A2_v2 + user_name: + description: + - Admin user name. + returned: always + type: str + sample: dtl_admin + storage_type: + description: + - Storage type to use for virtual machine. + returned: always + type: str + sample: standard + compute_vm_id: + description: + - Resource id of compute virtual machine. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myLab-myVm-097933/providers/Microsoft.Compute/virtualMachines/myVm + compute_vm_resource_group: + description: + - Resource group where compute virtual machine is created. + returned: always + type: str + sample: myLab-myVm-097933 + compute_vm_name: + description: + - Name of compute virtual machine. + returned: always + type: str + sample: myVm + fqdn: + description: + - Fully qualified domain name. + returned: always + type: str + sample: myvm.eastus.cloudapp.azure.com + provisioning_state: + description: + - Provisioning state of the virtual network. + returned: always + type: str + sample: Succeeded + tags: + description: + - The tags of the resource. + returned: always + type: complex + sample: "{ 'foo': 'bar' }" +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.mgmt.devtestlabs import DevTestLabsClient + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMDtlVirtualMachineInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + lab_name=dict( + type='str', + required=True + ), + name=dict( + type='str' + ), + tags=dict( + type='list', + elements='str' + ) + ) + # store the results of the module operation + self.results = dict( + changed=False + ) + self.mgmt_client = None + self.resource_group = None + self.lab_name = None + self.name = None + self.tags = None + super(AzureRMDtlVirtualMachineInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False, facts_module=True) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_devtestlabvirtualmachine_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_devtestlabvirtualmachine_facts' module has been renamed to 'azure_rm_devtestlabvirtualmachine_info'", + version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient, + is_track2=True, + base_url=self._cloud_environment.endpoints.resource_manager) + + if self.name: + self.results['virtualmachines'] = self.get() + else: + self.results['virtualmachines'] = self.list() + + return self.results + + def get(self): + response = None + results = [] + try: + response = self.mgmt_client.virtual_machines.get(resource_group_name=self.resource_group, + lab_name=self.lab_name, + name=self.name) + self.log("Response : {0}".format(response)) + except ResourceNotFoundError as e: + self.fail('Could not get facts for Virtual Machine.') + + if response and self.has_tags(response.tags, self.tags): + results.append(self.format_response(response)) + + return results + + def list(self): + response = None + results = [] + try: + response = self.mgmt_client.virtual_machines.list(resource_group_name=self.resource_group, + lab_name=self.lab_name) + self.log("Response : {0}".format(response)) + except Exception as e: + self.fail('Could not get facts for Virtual Machine.') + + if response is not None: + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(self.format_response(item)) + return results + + def format_response(self, item): + d = item.as_dict() + d = { + 'id': d.get('id', None), + 'resource_group': self.parse_resource_to_dict(d.get('id')).get('resource_group'), + 'lab_name': self.parse_resource_to_dict(d.get('id')).get('name'), + 'name': d.get('name'), + 'notes': d.get('notes'), + 'disallow_public_ip_address': d.get('disallow_public_ip_address'), + 'expiration_date': d.get('expiration_date'), + 'image': d.get('gallery_image_reference'), + 'os_type': d.get('os_type').lower(), + 'vm_size': d.get('size'), + 'user_name': d.get('user_name'), + 'storage_type': d.get('storage_type').lower(), + 'compute_vm_id': d.get('compute_id'), + 'compute_vm_resource_group': self.parse_resource_to_dict(d.get('compute_id')).get('resource_group'), + 'compute_vm_name': self.parse_resource_to_dict(d.get('compute_id')).get('name'), + 'fqdn': d.get('fqdn'), + 'provisioning_state': d.get('provisioning_state'), + 'tags': d.get('tags', None) + } + return d + + +def main(): + AzureRMDtlVirtualMachineInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabvirtualnetwork.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabvirtualnetwork.py new file mode 100644 index 000000000..0ecd93098 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabvirtualnetwork.py @@ -0,0 +1,289 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Zim Kalinowski, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_devtestlabvirtualnetwork +version_added: "0.1.2" +short_description: Manage Azure DevTest Lab Virtual Network instance +description: + - Create, update and delete instance of Azure DevTest Lab Virtual Network. + +options: + resource_group: + description: + - The name of the resource group. + required: True + lab_name: + description: + - The name of the lab. + required: True + name: + description: + - The name of the virtual network. + required: True + location: + description: + - The location of the resource. + description: + description: + - The description of the virtual network. + state: + description: + - Assert the state of the Virtual Network. + - Use C(present) to create or update an Virtual Network and C(absent) to delete it. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Create (or update) Virtual Network + azure_rm_devtestlabvirtualnetwork: + resource_group: myResourceGroup + lab_name: mylab + name: myvn + description: My Lab Virtual Network +''' + +RETURN = ''' +id: + description: + - The identifier of the resource. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/testrg/providers/microsoft.devtestlab/ + mylab/mylab/virtualnetworks/myvn" +external_provider_resource_id: + description: + - The identifier of external virtual network. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/testrg/providers/Microsoft.Network/vi + rtualNetworks/myvn" +''' + +import time +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +from ansible.module_utils.common.dict_transformations import _snake_to_camel + +try: + from azure.core.polling import LROPoller + from azure.core.exceptions import ResourceNotFoundError + from msrestazure.azure_operation import AzureOperationPoller + from azure.mgmt.devtestlabs import DevTestLabsClient + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMDevTestLabVirtualNetwork(AzureRMModuleBase): + """Configuration class for an Azure RM Virtual Network resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + lab_name=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + location=dict( + type='str' + ), + description=dict( + type='str' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.lab_name = None + self.name = None + self.virtual_network = {} + + self.results = dict(changed=False) + self.mgmt_client = None + self.state = None + self.to_do = Actions.NoAction + + super(AzureRMDevTestLabVirtualNetwork, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()) + ['tags']: + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + self.virtual_network[key] = kwargs[key] + + response = None + + self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient, + base_url=self._cloud_environment.endpoints.resource_manager, + is_track2=True, + api_version='2018-10-15') + + resource_group = self.get_resource_group(self.resource_group) + if self.virtual_network.get('location') is None: + self.virtual_network['location'] = resource_group.location + + # subnet overrides for virtual network and subnet created by default + template = "/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/virtualNetworks/{2}/subnets/{3}" + subnet_id = template.format(self.subscription_id, + self.resource_group, + self.name, + self.name + "Subnet") + self.virtual_network['subnet_overrides'] = [{ + 'resource_id': subnet_id, + 'lab_subnet_name': self.name + "Subnet", + 'use_in_vm_creation_permission': 'Allow', + 'use_public_ip_address_permission': 'Allow' + }] + + old_response = self.get_virtualnetwork() + + if not old_response: + self.log("Virtual Network instance doesn't exist") + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + self.log("Virtual Network instance already exists") + if self.state == 'absent': + self.to_do = Actions.Delete + elif self.state == 'present': + if self.virtual_network.get('description') is not None and self.virtual_network.get('description') != old_response.get('description'): + self.to_do = Actions.Update + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log("Need to Create / Update the Virtual Network instance") + self.results['changed'] = True + if self.check_mode: + return self.results + response = self.create_update_virtualnetwork() + self.log("Creation / Update done") + elif self.to_do == Actions.Delete: + self.log("Virtual Network instance deleted") + self.results['changed'] = True + if self.check_mode: + return self.results + self.delete_virtualnetwork() + # This currently doesn't work as there is a bug in SDK / Service + if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller): + response = self.get_poller_result(response) + else: + self.log("Virtual Network instance unchanged") + self.results['changed'] = False + response = old_response + + if self.state == 'present': + self.results.update({ + 'id': response.get('id', None), + 'external_provider_resource_id': response.get('external_provider_resource_id', None) + }) + return self.results + + def create_update_virtualnetwork(self): + ''' + Creates or updates Virtual Network with the specified configuration. + + :return: deserialized Virtual Network instance state dictionary + ''' + self.log("Creating / Updating the Virtual Network instance {0}".format(self.name)) + + try: + response = self.mgmt_client.virtual_networks.begin_create_or_update(resource_group_name=self.resource_group, + lab_name=self.lab_name, + name=self.name, + virtual_network=self.virtual_network) + if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller): + response = self.get_poller_result(response) + + except Exception as exc: + self.log('Error attempting to create the Virtual Network instance.') + self.fail("Error creating the Virtual Network instance: {0}".format(str(exc))) + return response.as_dict() + + def delete_virtualnetwork(self): + ''' + Deletes specified Virtual Network instance in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the Virtual Network instance {0}".format(self.name)) + try: + response = self.mgmt_client.virtual_networks.begin_delete(resource_group_name=self.resource_group, + lab_name=self.lab_name, + name=self.name) + except Exception as e: + self.log('Error attempting to delete the Virtual Network instance.') + self.fail("Error deleting the Virtual Network instance: {0}".format(str(e))) + + return True + + def get_virtualnetwork(self): + ''' + Gets the properties of the specified Virtual Network. + + :return: deserialized Virtual Network instance state dictionary + ''' + self.log("Checking if the Virtual Network instance {0} is present".format(self.name)) + found = False + try: + response = self.mgmt_client.virtual_networks.get(resource_group_name=self.resource_group, + lab_name=self.lab_name, + name=self.name) + found = True + self.log("Response : {0}".format(response)) + self.log("Virtual Network instance : {0} found".format(response.name)) + except ResourceNotFoundError as e: + self.log('Did not find the Virtual Network instance.') + if found is True: + return response.as_dict() + + return False + + +def main(): + """Main execution""" + AzureRMDevTestLabVirtualNetwork() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabvirtualnetwork_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabvirtualnetwork_info.py new file mode 100644 index 000000000..b66b2f45b --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_devtestlabvirtualnetwork_info.py @@ -0,0 +1,217 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Zim Kalinowski, (@zikalino) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_devtestlabvirtualnetwork_info +version_added: "0.1.2" +short_description: Get Azure DevTest Lab Virtual Network facts +description: + - Get facts of Azure DevTest Lab Virtual Network. + +options: + resource_group: + description: + - The name of the resource group. + required: True + type: str + lab_name: + description: + - The name of DevTest Lab. + required: True + type: str + name: + description: + - The name of DevTest Lab Virtual Network. + type: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Get instance of DevTest Lab Virtual Network + azure_rm_devtestlabvirtualnetwork_info: + resource_group: myResourceGroup + lab_name: myLab + name: myVirtualNetwork + + - name: List all Virtual Networks in DevTest Lab + azure_rm_devtestlabvirtualnetwork_info: + resource_group: myResourceGroup + lab_name: myLab + name: myVirtualNetwork +''' + +RETURN = ''' +virtualnetworks: + description: + - A list of dictionaries containing facts for DevTest Lab Virtual Network. + returned: always + type: complex + contains: + id: + description: + - The identifier of the virtual network. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/microsoft.devtestlab/labs/myLab/virt + ualnetworks/myVirtualNetwork" + resource_group: + description: + - Name of the resource group. + returned: always + type: str + sample: myResourceGroup + lab_name: + description: + - Name of the lab. + returned: always + type: str + sample: myLab + name: + description: + - Name of the virtual network. + returned: always + type: str + sample: myVirtualNetwork + description: + description: + - Description of the virtual network. + returned: always + type: str + sample: My Virtual Network + external_provider_resource_id: + description: + - Resource id of an external virtual network. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/my + VirtualNetwork" + provisioning_state: + description: + - Provisioning state of the virtual network. + returned: always + type: str + sample: Succeeded +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.mgmt.devtestlabs import DevTestLabsClient + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMDevTestLabVirtualNetworkInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + lab_name=dict( + type='str', + required=True + ), + name=dict( + type='str' + ) + ) + # store the results of the module operation + self.results = dict( + changed=False + ) + self.mgmt_client = None + self.resource_group = None + self.lab_name = None + self.name = None + super(AzureRMDevTestLabVirtualNetworkInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_devtestlabvirtualnetwork_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_devtestlabvirtualnetwork_facts' module has been renamed to 'azure_rm_devtestlabvirtualnetwork_info'", + version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient, + is_track2=True, + base_url=self._cloud_environment.endpoints.resource_manager) + + if self.name: + self.results['virtualnetworks'] = self.get() + else: + self.results['virtualnetworks'] = self.list() + + return self.results + + def list(self): + response = None + results = [] + try: + response = self.mgmt_client.virtual_networks.list(resource_group_name=self.resource_group, + lab_name=self.lab_name) + self.log("Response : {0}".format(response)) + except Exception as e: + self.fail('Could not list Virtual Networks for DevTest Lab.') + + if response is not None: + for item in response: + results.append(self.format_response(item)) + + return results + + def get(self): + response = None + results = [] + try: + response = self.mgmt_client.virtual_networks.get(resource_group_name=self.resource_group, + lab_name=self.lab_name, + name=self.name) + self.log("Response : {0}".format(response)) + except ResourceNotFoundError as e: + self.fail('Could not get facts for Virtual Network.') + + if response: + results.append(self.format_response(response)) + + return results + + def format_response(self, item): + d = item.as_dict() + d = { + 'resource_group': self.resource_group, + 'lab_name': self.lab_name, + 'name': d.get('name', None), + 'id': d.get('id', None), + 'external_provider_resource_id': d.get('external_provider_resource_id', None), + 'provisioning_state': d.get('provisioning_state', None), + 'description': d.get('description', None) + } + return d + + +def main(): + AzureRMDevTestLabVirtualNetworkInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_diskencryptionset.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_diskencryptionset.py new file mode 100644 index 000000000..93bd8ab47 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_diskencryptionset.py @@ -0,0 +1,318 @@ +#!/usr/bin/python +# +# Copyright (c) 2021 Aparna Patil(@techcon65) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_diskencryptionset + +version_added: "1.9.0" + +short_description: Create, delete and update Disk encryption set + +description: + - Creates, deletes, and updates Disk encryption set. + +options: + resource_group: + description: + - The name of resource group. + required: true + type: str + name: + description: + - The name of the disk encryption set. + required: true + type: str + location: + description: + - Location for Disk encryption set. Defaults to location of resource group if not specified. + type: str + source_vault: + description: + - The name of source key vault containing encryption key. + type: str + key_url: + description: + - The url pointing to the encryption key to be used for disk encryption set. + type: str + state: + description: + - Assert the state of the disk encryption set. Use C(present) to create or update and C(absent) to delete. + default: present + type: str + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Aparna Patil (@techcon65) +''' + +EXAMPLES = ''' +- name: create disk encryption set + azure_rm_diskencryptionset: + resource_group: myResourceGroup + name: mydiskencryptionset + source_vault: myvault + key_url: https://myvault.vault.azure.net/keys/Key1/e65090b268ec4c3ba1a0f7a473005768 + state: present + +- name: Update disk encryption set + azure_rm_diskencryptionset: + resource_group: myResourceGroup + name: mydiskencryptionset + source_vault: myvault + key_url: https://myvault.vault.azure.net/keys/Key1/e65090b268ec4c3ba1a0f7a473005768 + state: present + tags: + key1: "value1" + +- name: Delete disk encryption set + azure_rm_diskencryptionset: + resource_group: myResourceGroup + name: mydiskencryptionset + state: absent +''' + +RETURN = ''' +state: + description: + - Current state of the Disk Encryption Set. + returned: always + type: complex + contains: + id: + description: + - The disk encryption set ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/ + Microsoft.Compute/diskEncryptionSets/mydiskencryptionset" + name: + description: + - Disk encryption name. + returned: always + type: str + sample: 'mydiskencryptionset' + location: + description: + - The Azure Region where the resource lives. + returned: always + type: str + sample: eastus + tags: + description: + - Resource tags. + returned: always + type: list + sample: [{"key1": "value1"}] + active_key: + description: + - Reference to Key vault and key used for disk encryption set. + returned: always + type: dict + sample: { + "key_url": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", + "source_vault": { + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/ + Microsoft.KeyVault/vaults/myvault" + } + } + identity: + description: + - The managed identity for the disk encryption set. + returned: always + type: dict + sample: { + "principal_id": "d3abec0a-5818-4bbd-8300-8014198124ca", + "tenant_id": "7268bab5-aabd-44f9-915f-6bf864e879c6", + "type": "SystemAssigned" + } + provisioning_state: + description: + - The provisioning state of the resource. + returned: always + type: str + sample: Succeeded + type: + description: + - The type of the resource. + returned: always + type: str + sample: "Microsoft.Compute/diskEncryptionSets" +''' + +from ansible.module_utils.basic import _load_params +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase, HAS_AZURE, \ + format_resource_id, normalize_location_name + +try: + from azure.core.polling import LROPoller + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMDiskEncryptionSet(AzureRMModuleBase): + + def __init__(self): + + _load_params() + # define user inputs from playbook + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True), + name=dict(type='str', required=True), + location=dict(type='str'), + source_vault=dict(type='str'), + key_url=dict(type='str', no_log=True), + state=dict(choices=['present', 'absent'], default='present', type='str') + ) + + required_if = [ + ('state', 'present', ['source_vault', 'key_url']) + ] + + self.results = dict( + changed=False, + state=dict() + ) + + self.resource_group = None + self.name = None + self.location = None + self.source_vault = None + self.key_url = None + self.state = None + self.tags = None + + super(AzureRMDiskEncryptionSet, self).__init__(self.module_arg_spec, + required_if=required_if, + supports_check_mode=True) + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + changed = False + results = dict() + disk_encryption_set = None + + # retrieve resource group to make sure it exists + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + # Set default location + self.location = resource_group.location + + self.location = normalize_location_name(self.location) + + if self.source_vault: + source_vault = self.parse_resource_to_dict(self.source_vault) + self.source_vault = format_resource_id(val=source_vault['name'], + subscription_id=source_vault['subscription_id'], + namespace='Microsoft.KeyVault', + types='vaults', + resource_group=source_vault['resource_group']) + + try: + self.log('Fetching Disk encryption set {0}'.format(self.name)) + disk_encryption_set_old = self.compute_client.disk_encryption_sets.get(self.resource_group, + self.name) + # serialize object into a dictionary + results = self.diskencryptionset_to_dict(disk_encryption_set_old) + if self.state == 'present': + changed = False + update_tags, results['tags'] = self.update_tags(results['tags']) + if update_tags: + changed = True + self.tags = results['tags'] + if self.source_vault != results['active_key']['source_vault']['id']: + changed = True + results['active_key']['source_vault']['id'] = self.source_vault + if self.key_url != results['active_key']['key_url']: + changed = True + results['active_key']['key_url'] = self.key_url + elif self.state == 'absent': + changed = True + + except ResourceNotFoundError: + if self.state == 'present': + changed = True + else: + changed = False + + self.results['changed'] = changed + self.results['state'] = results + + if self.check_mode: + return self.results + + if changed: + if self.state == 'present': + identity = self.compute_models.EncryptionSetIdentity(type="SystemAssigned") + # create or update disk encryption set + disk_encryption_set_new = \ + self.compute_models.DiskEncryptionSet(location=self.location, + identity=identity) + if self.source_vault: + source_vault = self.compute_models.SourceVault(id=self.source_vault) + disk_encryption_set_new.active_key = \ + self.compute_models.KeyVaultAndKeyReference(source_vault=source_vault, + key_url=self.key_url) + if self.tags: + disk_encryption_set_new.tags = self.tags + self.results['state'] = self.create_or_update_diskencryptionset(disk_encryption_set_new) + + elif self.state == 'absent': + # delete disk encryption set + self.delete_diskencryptionset() + self.results['state'] = 'Deleted' + + return self.results + + def create_or_update_diskencryptionset(self, disk_encryption_set): + try: + # create the disk encryption set + response = \ + self.compute_client.disk_encryption_sets.begin_create_or_update(resource_group_name=self.resource_group, + disk_encryption_set_name=self.name, + disk_encryption_set=disk_encryption_set) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + except Exception as exc: + self.fail("Error creating or updating disk encryption set {0} - {1}".format(self.name, str(exc))) + return self.diskencryptionset_to_dict(response) + + def delete_diskencryptionset(self): + try: + # delete the disk encryption set + response = self.compute_client.disk_encryption_sets.begin_delete(resource_group_name=self.resource_group, + disk_encryption_set_name=self.name) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + except Exception as exc: + self.fail("Error deleting disk encryption set {0} - {1}".format(self.name, str(exc))) + return response + + def diskencryptionset_to_dict(self, diskencryptionset): + result = diskencryptionset.as_dict() + result['tags'] = diskencryptionset.tags + return result + + +def main(): + AzureRMDiskEncryptionSet() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_diskencryptionset_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_diskencryptionset_info.py new file mode 100644 index 000000000..4ba9e5cb4 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_diskencryptionset_info.py @@ -0,0 +1,208 @@ +#!/usr/bin/python +# +# Copyright (c) 2021 Aparna Patil(@techcon65) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_diskencryptionset_info + +version_added: "1.9.0" + +short_description: Get disk encryption set facts + +description: + - Get facts for specific disk encryption set or all sets in a given resource group. + +options: + resource_group: + description: + - Name of the resource group. + type: str + name: + description: + - Name of the disk encryption set. + type: str + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Aparna Patil (@techcon65) + +''' + +EXAMPLES = ''' +- name: Get facts for one disk encryption set + azure_rm_diskencryptionset_info: + resource_group: myResourceGroup + name: mydiskencryptionset + tags: + - key:value + +- name: Get facts for all disk encryption sets in resource group + azure_rm_diskencryptionset_info: + resource_group: myResourceGroup +''' + +RETURN = ''' +diskencryptionsets: + description: + - Gets a list of disk encryption sets. + returned: always + type: list + elements: dict + sample: [ + { + "active_key": { + "key_url": "https://myvault.vault.azure.net/keys/Key1/e65090b268ec4c3ba1a0f7a473005768", + "source_vault": { + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/ + Microsoft.KeyVault/vaults/myvault" + } + }, + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/ + Microsoft.Compute/diskEncryptionSets/mydiskencryptionset", + "identity": { + "principal_id": "d3abec0a-5818-4bbd-8300-8014198124ca", + "tenant_id": "7268bab5-aabd-44f9-915f-6bf864e879c6", + "type": "SystemAssigned" + }, + "location": "eastus", + "name": "mydiskencryptionset", + "provisioning_state": "Succeeded", + "tags": { + "key1": "value1" + }, + "type": "Microsoft.Compute/diskEncryptionSets" + } + ] +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError +except Exception: + # This is handled in azure_rm_common + pass + +AZURE_OBJECT_CLASS = 'DiskEncryptionSet' + + +class AzureRMDiskEncryptionSetInfo(AzureRMModuleBase): + + def __init__(self): + + # define user inputs into argument + self.module_arg_spec = dict( + name=dict(type='str'), + resource_group=dict(type='str'), + tags=dict(type='list', elements='str') + ) + + # store the results of the module operation + self.results = dict( + changed=False + ) + + self.name = None + self.resource_group = None + self.tags = None + + super(AzureRMDiskEncryptionSetInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False, facts_module=True) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + results = [] + # list the conditions and results to return based on user input + if self.name is not None: + # if there is set name is provided, return facts about that specific disk encryption set + results = self.get_item() + elif self.resource_group: + # all the disk encryption sets listed in specific resource group + results = self.list_resource_group() + else: + # all the disk encryption sets in a subscription + results = self.list_items() + + self.results['diskencryptionsets'] = self.curated_items(results) + + return self.results + + def get_item(self): + self.log('Get properties for {0}'.format(self.name)) + item = None + results = [] + # get specific disk encryption set + try: + item = self.compute_client.disk_encryption_sets.get(self.resource_group, self.name) + except ResourceNotFoundError: + pass + + # serialize result + if item and self.has_tags(item.tags, self.tags): + results = [item] + return results + + def list_resource_group(self): + self.log('List all disk encryption sets for resource group - {0}'.format(self.resource_group)) + try: + response = self.compute_client.disk_encryption_sets.list_by_resource_group(self.resource_group) + except ResourceNotFoundError as exc: + self.fail("Failed to list for resource group {0} - {1}".format(self.resource_group, str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(item) + return results + + def list_items(self): + self.log('List all disk encryption sets for a subscription ') + try: + response = self.compute_client.disk_encryption_sets.list() + except ResourceNotFoundError as exc: + self.fail("Failed to list all items - {0}".format(str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(item) + return results + + def curated_items(self, raws): + return [self.diskencryptionset_to_dict(item) for item in raws] if raws else [] + + def diskencryptionset_to_dict(self, diskencryptionset): + result = dict( + id=diskencryptionset.id, + name=diskencryptionset.name, + location=diskencryptionset.location, + tags=diskencryptionset.tags, + active_key=diskencryptionset.active_key.as_dict(), + provisioning_state=diskencryptionset.provisioning_state, + identity=diskencryptionset.identity.as_dict(), + type=diskencryptionset.type + ) + return result + + +def main(): + AzureRMDiskEncryptionSetInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_dnsrecordset.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_dnsrecordset.py new file mode 100644 index 000000000..55b682be6 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_dnsrecordset.py @@ -0,0 +1,553 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Obezimnaka Boms, +# Copyright (c) 2017 Ansible Project +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_dnsrecordset + +version_added: "0.1.0" + +short_description: Create, delete and update DNS record sets and records + +description: + - Creates, deletes, and updates DNS records sets and records within an existing Azure DNS Zone. + +options: + resource_group: + description: + - Name of resource group. + required: true + type: str + zone_name: + description: + - Name of the existing DNS zone in which to manage the record set. + required: true + type: str + relative_name: + description: + - Relative name of the record set. + required: true + type: str + record_type: + description: + - The type of record set to create or delete. + choices: + - A + - AAAA + - CNAME + - MX + - NS + - SRV + - TXT + - PTR + - CAA + - SOA + required: true + type: str + record_mode: + description: + - Whether existing record values not sent to the module should be purged. + default: purge + type: str + choices: + - append + - purge + state: + description: + - Assert the state of the record set. Use C(present) to create or update and C(absent) to delete. + default: present + type: str + choices: + - absent + - present + metadata: + description: + - The metadata tags for the record sets. + type: dict + append_metadata: + description: Whether metadata should be appended or not + type: bool + default: True + time_to_live: + description: + - Time to live of the record set in seconds. + default: 3600 + type: int + records: + description: + - List of records to be created depending on the type of record (set). + type: list + elements: dict + suboptions: + preference: + description: + - Used for creating an C(MX) record set/records. + priority: + description: + - Used for creating an C(SRV) record set/records. + weight: + description: + - Used for creating an C(SRV) record set/records. + port: + description: + - Used for creating an C(SRV) record set/records. + entry: + description: + - Primary data value for all record types. + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Obezimnaka Boms (@ozboms) + - Matt Davis (@nitzmahone) +''' + +EXAMPLES = ''' + +- name: ensure an "A" record set with multiple records + azure_rm_dnsrecordset: + resource_group: myResourceGroup + relative_name: www + zone_name: testing.com + record_type: A + records: + - entry: 192.168.100.101 + - entry: 192.168.100.102 + - entry: 192.168.100.103 + +- name: delete a record set + azure_rm_dnsrecordset: + resource_group: myResourceGroup + record_type: A + relative_name: www + zone_name: testing.com + state: absent + +- name: create A record set with metadata information + azure_rm_dnsrecordset: + resource_group: myResourceGroup + relative_name: www + zone_name: zone1.com + record_type: A + records: + - entry: 192.168.100.104 + metadata: + key1: "value1" + +- name: create multiple "A" record sets with multiple records + azure_rm_dnsrecordset: + resource_group: myResourceGroup + zone_name: testing.com + relative_name: "{{ item.name }}" + record_type: "{{ item.type }}" + records: "{{ item.records }}" + with_items: + - { name: 'servera', type: 'A', records: [ { entry: '10.10.10.20' }, { entry: '10.10.10.21' }] } + - { name: 'serverb', type: 'A', records: [ { entry: '10.10.10.30' }, { entry: '10.10.10.41' }] } + - { name: 'serverc', type: 'A', records: [ { entry: '10.10.10.40' }, { entry: '10.10.10.41' }] } + +- name: create SRV records in a new record set + azure_rm_dnsrecordset: + resource_group: myResourceGroup + relative_name: _sip._tcp.testing.com + zone_name: testing.com + time_to_live: 7200 + record_type: SRV + records: + - entry: sip.testing.com + preference: 10 + priority: 20 + weight: 10 + port: 5060 + +- name: create PTR record in a new record set + azure_rm_dnsrecordset: + resource_group: myResourceGroup + relative_name: 192.168.100.101.in-addr.arpa + zone_name: testing.com + record_type: PTR + records: + - entry: servera.testing.com + +- name: create TXT record in a new record set + azure_rm_dnsrecordset: + resource_group: myResourceGroup + relative_name: mail.testing.com + zone_name: testing.com + record_type: TXT + records: + - entry: 'v=spf1 a -all' + +- name: Update SOA record + azure_rm_dnsrecordset: + resource_group: myResourceGroup + relative_name: "@" + zone_name: testing.com + record_type: SOA + records: + - host: ns1-99.example.com. + email: azuredns-hostmaster99.example.com + serial_number: 99 + refresh_time: 3699 + retry_time: 399 + expire_time: 2419299 + minimum_ttl: 399 + +''' + +RETURN = ''' +state: + description: + - Current state of the DNS record set. + returned: always + type: complex + contains: + id: + description: + - The DNS record set ID. + returned: always + type: str + sample: "/subscriptions/xxxx......xxx/resourceGroups/v-xisuRG/providers/Microsoft.Network/dnszones/b57dc95985712e4523282.com/A/www" + name: + description: + - Relate name of the record set. + returned: always + type: str + sample: 'www' + fqdn: + description: + - Fully qualified domain name of the record set. + returned: always + type: str + sample: www.b57dc95985712e4523282.com + etag: + description: + - The etag of the record set. + returned: always + type: str + sample: 692c3e92-a618-46fc-aecd-8f888807cd6c + provisioning_state: + description: + - The DNS record set state. + returned: always + type: str + sample: Succeeded + target_resource: + description: + - The target resource of the record set. + returned: always + type: dict + sample: {} + ttl: + description: + - The TTL(time-to-live) of the records in the records set. + returned: always + type: int + sample: 3600 + type: + description: + - The type of DNS record in this record set. + returned: always + type: str + sample: A + arecords: + description: + - A list of records in the record set. + returned: always + type: list + sample: [ + { + "ipv4_address": "192.0.2.2" + }, + { + "ipv4_address": "192.0.2.4" + }, + { + "ipv4_address": "192.0.2.8" + } + ] +''' + +import inspect +import sys +import copy + +from ansible.module_utils.basic import _load_params +from ansible.module_utils.six import iteritems +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase, HAS_AZURE + +try: + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + + +RECORD_ARGSPECS = dict( + A=dict( + ipv4_address=dict(type='str', required=True, aliases=['entry']) + ), + AAAA=dict( + ipv6_address=dict(type='str', required=True, aliases=['entry']) + ), + CNAME=dict( + cname=dict(type='str', required=True, aliases=['entry']) + ), + MX=dict( + preference=dict(type='int', required=True), + exchange=dict(type='str', required=True, aliases=['entry']) + ), + NS=dict( + nsdname=dict(type='str', required=True, aliases=['entry']) + ), + PTR=dict( + ptrdname=dict(type='str', required=True, aliases=['entry']) + ), + SRV=dict( + priority=dict(type='int', required=True), + port=dict(type='int', required=True), + weight=dict(type='int', required=True), + target=dict(type='str', required=True, aliases=['entry']) + ), + TXT=dict( + value=dict(type='list', required=True, aliases=['entry']) + ), + SOA=dict( + host=dict(type='str', aliases=['entry']), + email=dict(type='str'), + serial_number=dict(type='int'), + refresh_time=dict(type='int'), + retry_time=dict(type='int'), + expire_time=dict(type='int'), + minimum_ttl=dict(type='int') + ), + CAA=dict( + value=dict(type='str', aliases=['entry']), + flags=dict(type='int'), + tag=dict(type='str') + ) + # FUTURE: ensure all record types are supported (see https://github.com/Azure/azure-sdk-for-python/tree/master/azure-mgmt-dns/azure/mgmt/dns/models) +) + +RECORDSET_VALUE_MAP = dict( + A=dict(attrname='a_records', classobj='ARecord', is_list=True), + AAAA=dict(attrname='aaaa_records', classobj='AaaaRecord', is_list=True), + CNAME=dict(attrname='cname_record', classobj='CnameRecord', is_list=False), + MX=dict(attrname='mx_records', classobj='MxRecord', is_list=True), + NS=dict(attrname='ns_records', classobj='NsRecord', is_list=True), + PTR=dict(attrname='ptr_records', classobj='PtrRecord', is_list=True), + SRV=dict(attrname='srv_records', classobj='SrvRecord', is_list=True), + TXT=dict(attrname='txt_records', classobj='TxtRecord', is_list=True), + SOA=dict(attrname='soa_record', classobj='SoaRecord', is_list=False), + CAA=dict(attrname='caa_records', classobj='CaaRecord', is_list=True) + # FUTURE: add missing record types from https://github.com/Azure/azure-sdk-for-python/blob/master/azure-mgmt-dns/azure/mgmt/dns/models/record_set.py +) if HAS_AZURE else {} + + +class AzureRMRecordSet(AzureRMModuleBase): + + def __init__(self): + + # we're doing two-pass arg validation, sample and store the args internally to allow this + _load_params() + + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True), + relative_name=dict(type='str', required=True), + zone_name=dict(type='str', required=True), + record_type=dict(choices=RECORD_ARGSPECS.keys(), required=True, type='str'), + record_mode=dict(choices=['append', 'purge'], default='purge'), + state=dict(choices=['present', 'absent'], default='present', type='str'), + time_to_live=dict(type='int', default=3600), + records=dict(type='list', elements='dict'), + metadata=dict(type='dict'), + append_metadata=dict(type='bool', default=True) + ) + + required_if = [ + ('state', 'present', ['records']) + ] + + self.results = dict( + changed=False + ) + + # first-pass arg validation so we can get the record type- skip exec_module + super(AzureRMRecordSet, self).__init__(self.module_arg_spec, required_if=required_if, supports_check_mode=True, skip_exec=True) + + # look up the right subspec and metadata + record_subspec = RECORD_ARGSPECS.get(self.module.params['record_type']) + + # patch the right record shape onto the argspec + self.module_arg_spec['records']['options'] = record_subspec + + self.resource_group = None + self.relative_name = None + self.zone_name = None + self.record_type = None + self.record_mode = None + self.state = None + self.time_to_live = None + self.records = None + self.metadata = None + + # rerun validation and actually run the module this time + super(AzureRMRecordSet, self).__init__(self.module_arg_spec, required_if=required_if, supports_check_mode=True) + + def exec_module(self, **kwargs): + for key in self.module_arg_spec.keys(): + setattr(self, key, kwargs[key]) + + zone = self.dns_client.zones.get(self.resource_group, self.zone_name) + if not zone: + self.fail('The zone {0} does not exist in the resource group {1}'.format(self.zone_name, self.resource_group)) + + try: + self.log('Fetching Record Set {0}'.format(self.relative_name)) + record_set = self.dns_client.record_sets.get(self.resource_group, self.zone_name, self.relative_name, self.record_type) + self.results['state'] = self.recordset_to_dict(record_set) + except ResourceNotFoundError: + record_set = None + # FUTURE: fail on anything other than ResourceNotFound + + record_type_metadata = RECORDSET_VALUE_MAP.get(self.record_type) + + # FUTURE: implement diff mode + + if self.state == 'present': + # convert the input records to SDK objects + self.input_sdk_records = self.create_sdk_records(self.records, self.record_type) + + if not record_set: + changed = True + else: + # and use it to get the type-specific records + server_records = getattr(record_set, record_type_metadata.get('attrname')) + + # compare the input records to the server records + self.input_sdk_records, changed = self.records_changed(self.input_sdk_records, server_records) + + # also check top-level recordset properties + changed |= record_set.ttl != self.time_to_live + + old_metadata = self.results['state']['metadata'] if 'metadata' in self.results['state'] else dict() + update_metadata, self.results['state']['metadata'] = self.update_metadata(old_metadata) + if update_metadata: + changed = True + self.metadata = self.results['state']['metadata'] + + self.results['changed'] |= changed + + elif self.state == 'absent': + if record_set: + self.results['changed'] = True + + if self.check_mode: + return self.results + + if self.results['changed']: + if self.state == 'present': + record_set_args = dict( + ttl=self.time_to_live + ) + + record_set_args[record_type_metadata['attrname']] = self.input_sdk_records if record_type_metadata['is_list'] else self.input_sdk_records[0] + + record_set = self.dns_models.RecordSet(**record_set_args) + if self.metadata: + record_set.metadata = self.metadata + + self.results['state'] = self.create_or_update(record_set) + + elif self.state == 'absent': + # delete record set + self.delete_record_set() + + return self.results + + def create_or_update(self, record_set): + try: + record_set = self.dns_client.record_sets.create_or_update(resource_group_name=self.resource_group, + zone_name=self.zone_name, + relative_record_set_name=self.relative_name, + record_type=self.record_type, + parameters=record_set) + return self.recordset_to_dict(record_set) + except Exception as exc: + self.fail("Error creating or updating dns record {0} - {1}".format(self.relative_name, exc.message or str(exc))) + + def delete_record_set(self): + try: + # delete the record set + self.dns_client.record_sets.delete(resource_group_name=self.resource_group, + zone_name=self.zone_name, + relative_record_set_name=self.relative_name, + record_type=self.record_type) + except Exception as exc: + self.fail("Error deleting record set {0} - {1}".format(self.relative_name, exc.message or str(exc))) + return None + + def create_sdk_records(self, input_records, record_type): + record = RECORDSET_VALUE_MAP.get(record_type) + if not record: + self.fail('record type {0} is not supported now'.format(record_type)) + record_sdk_class = getattr(self.dns_models, record.get('classobj')) + return [record_sdk_class(**x) for x in input_records] + + def records_changed(self, input_records, server_records): + # ensure we're always comparing a list, even for the single-valued types + if not isinstance(server_records, list): + server_records = [server_records] + + input_set = set([self.module.jsonify(x.as_dict()) for x in input_records]) + server_set = set([self.module.jsonify(x.as_dict()) for x in server_records]) + + if self.record_mode == 'append': # only a difference if the server set is missing something from the input set + input_set = server_set.union(input_set) + + # non-append mode; any difference in the sets is a change + changed = input_set != server_set + + records = [self.module.from_json(x) for x in input_set] + return self.create_sdk_records(records, self.record_type), changed + + def recordset_to_dict(self, recordset): + result = recordset.as_dict() + result['type'] = result['type'].strip('Microsoft.Network/dnszones/') + return result + + def update_metadata(self, metadata): + metadata = metadata or dict() + new_metadata = copy.copy(metadata) if isinstance(metadata, dict) else dict() + param_metadata = self.metadata if isinstance(self.metadata, dict) else dict() + append_metadata = self.append_metadata if self.metadata is not None else True + changed = False + # check add or update metadata + for key, value in param_metadata.items(): + if not new_metadata.get(key) or new_metadata[key] != value: + changed = True + new_metadata[key] = value + # check remove + if not append_metadata: + for key, value in metadata.items(): + if not param_metadata.get(key): + new_metadata.pop(key) + changed = True + return changed, new_metadata + + +def main(): + AzureRMRecordSet() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_dnsrecordset_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_dnsrecordset_info.py new file mode 100644 index 000000000..e46936418 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_dnsrecordset_info.py @@ -0,0 +1,305 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Obezimnaka Boms, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_dnsrecordset_info + +version_added: "0.1.2" + +short_description: Get DNS Record Set facts + +description: + - Get facts for a specific DNS Record Set in a Zone, or a specific type in all Zones or in one Zone etc. + +options: + relative_name: + description: + - Only show results for a Record Set. + type: str + resource_group: + description: + - Limit results by resource group. Required when filtering by name or type. + type: str + zone_name: + description: + - Limit results by zones. Required when filtering by name or type. + type: str + record_type: + description: + - Limit record sets by record type. + type: str + top: + description: + - Limit the maximum number of record sets to return. + type: int + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Ozi Boms (@ozboms) + +''' + +EXAMPLES = ''' +- name: Get facts for one Record Set + azure_rm_dnsrecordset_info: + resource_group: myResourceGroup + zone_name: example.com + relative_name: server10 + record_type: A +- name: Get facts for all Type A Record Sets in a Zone + azure_rm_dnsrecordset_info: + resource_group: myResourceGroup + zone_name: example.com + record_type: A +- name: Get all record sets in one zone + azure_rm_dnsrecordset_info: + resource_group: myResourceGroup + zone_name: example.com +''' + +RETURN = ''' +azure_dnsrecordset: + description: + - List of record set dicts. + returned: always + type: list + example: [ + { + "etag": "60ac0480-44dd-4881-a2ed-680d20b3978e", + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/dnszones/newzone.com/A/servera", + "name": "servera", + "properties": { + "ARecords": [ + { + "ipv4Address": "10.4.5.7" + }, + { + "ipv4Address": "2.4.5.8" + } + ], + "TTL": 12900 + }, + "type": "Microsoft.Network/dnszones/A" + }] +dnsrecordsets: + description: + - List of record set dicts, which shares the same hierarchy as M(azure.azcollection.azure_rm_dnsrecordset) module's parameter. + returned: always + type: complex + contains: + id: + description: + - ID of the dns recordset. + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/dnszones/newzone. + com/A/servera" + relative_name: + description: + - Name of the dns recordset. + type: str + sample: servera + record_type: + description: + - The type of the record set. + - Can be C(A), C(AAAA), C(CNAME), C(MX), C(NS), C(SRV), C(TXT), C(PTR). + type: str + sample: A + time_to_live: + description: + - Time to live of the record set in seconds. + type: int + sample: 12900 + records: + description: + - List of records depending on the type of recordset. + type: dict + sample: [ + { + "ipv4Address": "10.4.5.7" + }, + { + "ipv4Address": "2.4.5.8" + } + ] + provisioning_state: + description: + - Provision state of the resource. + type: str + sample: Successed + fqdn: + description: + - Fully qualified domain name of the record set. + type: str + sample: www.newzone.com +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError +except Exception: + # This is handled in azure_rm_common + pass + +AZURE_OBJECT_CLASS = 'RecordSet' + + +RECORDSET_VALUE_MAP = dict( + A='a_records', + AAAA='aaaa_records', + CNAME='cname_record', + MX='mx_records', + NS='ns_records', + PTR='ptr_records', + SRV='srv_records', + TXT='txt_records', + SOA='soa_record', + CAA='caa_records' + # FUTURE: add missing record types from https://github.com/Azure/azure-sdk-for-python/blob/master/azure-mgmt-dns/azure/mgmt/dns/models/record_set.py +) + + +class AzureRMRecordSetInfo(AzureRMModuleBase): + + def __init__(self): + + # define user inputs into argument + self.module_arg_spec = dict( + relative_name=dict(type='str'), + resource_group=dict(type='str'), + zone_name=dict(type='str'), + record_type=dict(type='str'), + top=dict(type='int') + ) + + # store the results of the module operation + self.results = dict( + changed=False, + ) + + self.relative_name = None + self.resource_group = None + self.zone_name = None + self.record_type = None + self.top = None + + super(AzureRMRecordSetInfo, self).__init__(self.module_arg_spec, supports_check_mode=True) + + def exec_module(self, **kwargs): + + is_old_facts = self.module._name == 'azure_rm_dnsrecordset_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_dnsrecordset_facts' module has been renamed to 'azure_rm_dnsrecordset_info'", version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if not self.top or self.top <= 0: + self.top = None + + # create conditionals to catch errors when calling record facts + if self.relative_name and not self.resource_group: + self.fail("Parameter error: resource group required when filtering by name or record type.") + if self.relative_name and not self.zone_name: + self.fail("Parameter error: DNS Zone required when filtering by name or record type.") + + results = [] + # list the conditions for what to return based on input + if self.relative_name is not None: + # if there is a name listed, they want only facts about that specific Record Set itself + results = self.get_item() + elif self.record_type: + # else, they just want all the record sets of a specific type + results = self.list_type() + elif self.zone_name: + # if there is a zone name listed, then they want all the record sets in a zone + results = self.list_zone() + + if is_old_facts: + self.results['ansible_facts'] = { + 'azure_dnsrecordset': self.serialize_list(results) + } + self.results['dnsrecordsets'] = self.curated_list(results) + return self.results + + def get_item(self): + self.log('Get properties for {0}'.format(self.relative_name)) + item = None + results = [] + + # try to get information for specific Record Set + try: + item = self.dns_client.record_sets.get(self.resource_group, self.zone_name, self.relative_name, self.record_type) + except ResourceNotFoundError: + results = [] + pass + else: + results = [item] + return results + + def list_type(self): + self.log('Lists the record sets of a specified type in a DNS zone') + try: + response = self.dns_client.record_sets.list_by_type(self.resource_group, self.zone_name, self.record_type, top=self.top) + except Exception as exc: + self.fail("Failed to list for record type {0} - {1}".format(self.record_type, str(exc))) + + results = [] + for item in response: + results.append(item) + return results + + def list_zone(self): + self.log('Lists all record sets in a DNS zone') + try: + response = self.dns_client.record_sets.list_by_dns_zone(self.resource_group, self.zone_name, top=self.top) + except Exception as exc: + self.fail("Failed to list for zone {0} - {1}".format(self.zone_name, str(exc))) + + results = [] + for item in response: + results.append(item) + return results + + def serialize_list(self, raws): + return [self.serialize_obj(item, AZURE_OBJECT_CLASS) for item in raws] if raws else [] + + def curated_list(self, raws): + return [self.record_to_dict(item) for item in raws] if raws else [] + + def record_to_dict(self, record): + record_type = record.type[len('Microsoft.Network/dnszones/'):] + records = getattr(record, RECORDSET_VALUE_MAP.get(record_type)) + if records: + if not isinstance(records, list): + records = [records] + else: + records = [] + return dict( + id=record.id, + relative_name=record.name, + record_type=record_type, + records=[x.as_dict() for x in records], + time_to_live=record.ttl, + fqdn=record.fqdn, + provisioning_state=record.provisioning_state, + metadata=record.metadata + ) + + +def main(): + AzureRMRecordSetInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_dnszone.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_dnszone.py new file mode 100644 index 000000000..f5033be38 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_dnszone.py @@ -0,0 +1,297 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Obezimnaka Boms, +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_dnszone + +version_added: "0.1.0" + +short_description: Manage Azure DNS zones + +description: + - Creates and deletes Azure DNS zones. + +options: + resource_group: + description: + - name of resource group. + required: true + name: + description: + - Name of the DNS zone. + required: true + state: + description: + - Assert the state of the zone. Use C(present) to create or update and C(absent) to delete. + default: present + choices: + - absent + - present + type: + description: + - The type of this DNS zone (C(public) or C(private)). + choices: + - public + - private + registration_virtual_networks: + description: + - A list of references to virtual networks that register hostnames in this DNS zone. + - This is a only when I(type=private). + - Each element can be the name or resource id, or a dict contains C(name), C(resource_group) information of the virtual network. + type: list + elements: raw + resolution_virtual_networks: + description: + - A list of references to virtual networks that resolve records in this DNS zone. + - This is a only when I(type=private). + - Each element can be the name or resource id, or a dict contains C(name), C(resource_group) information of the virtual network. + type: list + elements: raw + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Obezimnaka Boms (@ozboms) +''' + +EXAMPLES = ''' + +- name: Create a DNS zone + azure_rm_dnszone: + resource_group: myResourceGroup + name: example.com + +- name: Delete a DNS zone + azure_rm_dnszone: + resource_group: myResourceGroup + name: example.com + state: absent + +''' + +RETURN = ''' +state: + description: + - Current state of the zone. + returned: always + type: dict + sample: { + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup", + "location": "global", + "name": "Testing", + "name_servers": [ + "ns1-07.azure-dns.com.", + "ns2-07.azure-dns.net.", + "ns3-07.azure-dns.org.", + "ns4-07.azure-dns.info." + ], + "number_of_record_sets": 2, + "type": "private", + "resolution_virtual_networks": ["/subscriptions/XXXX/resourceGroup/myResourceGroup/providers/Microsoft.Network/virtualNetworks/foo"] + } + +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id +from ansible.module_utils._text import to_native + +try: + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMDNSZone(AzureRMModuleBase): + + def __init__(self): + + # define user inputs from playbook + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True), + name=dict(type='str', required=True), + state=dict(choices=['present', 'absent'], default='present', type='str'), + type=dict(type='str', choices=['private', 'public']), + registration_virtual_networks=dict(type='list', elements='raw'), + resolution_virtual_networks=dict(type='list', elements='raw') + ) + + # store the results of the module operation + self.results = dict( + changed=False, + state=dict() + ) + + self.resource_group = None + self.name = None + self.state = None + self.tags = None + self.type = None + self.registration_virtual_networks = None + self.resolution_virtual_networks = None + + super(AzureRMDNSZone, self).__init__(self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + + # create a new zone variable in case the 'try' doesn't find a zone + zone = None + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + self.registration_virtual_networks = self.preprocess_vn_list(self.registration_virtual_networks) + self.resolution_virtual_networks = self.preprocess_vn_list(self.resolution_virtual_networks) + + self.results['check_mode'] = self.check_mode + + # retrieve resource group to make sure it exists + self.get_resource_group(self.resource_group) + + changed = False + results = dict() + + try: + self.log('Fetching DNS zone {0}'.format(self.name)) + zone = self.dns_client.zones.get(self.resource_group, self.name) + + # serialize object into a dictionary + results = zone_to_dict(zone) + + # don't change anything if creating an existing zone, but change if deleting it + if self.state == 'present': + changed = False + + update_tags, results['tags'] = self.update_tags(results['tags']) + if update_tags: + changed = True + if self.type and results['type'] != self.type: + changed = True + results['type'] = self.type + if self.resolution_virtual_networks: + if set(self.resolution_virtual_networks) != set(results['resolution_virtual_networks'] or []): + changed = True + results['resolution_virtual_networks'] = self.resolution_virtual_networks + else: + # this property should not be changed + self.resolution_virtual_networks = results['resolution_virtual_networks'] + if self.registration_virtual_networks: + if set(self.registration_virtual_networks) != set(results['registration_virtual_networks'] or []): + changed = True + results['registration_virtual_networks'] = self.registration_virtual_networks + else: + self.registration_virtual_networks = results['registration_virtual_networks'] + elif self.state == 'absent': + changed = True + + except ResourceNotFoundError: + # the zone does not exist so create it + if self.state == 'present': + changed = True + else: + # you can't delete what is not there + changed = False + + self.results['changed'] = changed + self.results['state'] = results + + # return the results if your only gathering information + if self.check_mode: + return self.results + + if changed: + if self.state == 'present': + zone = self.dns_models.Zone(zone_type=str.capitalize(self.type) if self.type else None, + tags=self.tags, + location='global') + if self.resolution_virtual_networks: + zone.resolution_virtual_networks = self.construct_subresource_list(self.resolution_virtual_networks) + if self.registration_virtual_networks: + zone.registration_virtual_networks = self.construct_subresource_list(self.registration_virtual_networks) + self.results['state'] = self.create_or_update_zone(zone) + elif self.state == 'absent': + # delete zone + self.delete_zone() + # the delete does not actually return anything. if no exception, then we'll assume + # it worked. + self.results['state']['status'] = 'Deleted' + + return self.results + + def create_or_update_zone(self, zone): + try: + # create or update the new Zone object we created + new_zone = self.dns_client.zones.create_or_update(self.resource_group, self.name, zone) + except Exception as exc: + self.fail("Error creating or updating zone {0} - {1}".format(self.name, exc.message or str(exc))) + return zone_to_dict(new_zone) + + def delete_zone(self): + try: + # delete the Zone + poller = self.dns_client.zones.begin_delete(self.resource_group, self.name) + result = self.get_poller_result(poller) + except Exception as exc: + self.fail("Error deleting zone {0} - {1}".format(self.name, exc.message or str(exc))) + return result + + def preprocess_vn_list(self, vn_list): + return [self.parse_vn_id(x) for x in vn_list] if vn_list else None + + def parse_vn_id(self, vn): + vn_dict = self.parse_resource_to_dict(vn) if not isinstance(vn, dict) else vn + return format_resource_id(val=vn_dict['name'], + subscription_id=vn_dict.get('subscription') or self.subscription_id, + namespace='Microsoft.Network', + types='virtualNetworks', + resource_group=vn_dict.get('resource_group') or self.resource_group) + + def construct_subresource_list(self, raw): + return [self.dns_models.SubResource(id=x) for x in raw] if raw else None + + +def zone_to_dict(zone): + # turn Zone object into a dictionary (serialization) + result = dict( + id=zone.id, + name=zone.name, + number_of_record_sets=zone.number_of_record_sets, + name_servers=zone.name_servers, + tags=zone.tags, + type=zone.zone_type.lower(), + registration_virtual_networks=[to_native(x.id) for x in zone.registration_virtual_networks] if zone.registration_virtual_networks else None, + resolution_virtual_networks=[to_native(x.id) for x in zone.resolution_virtual_networks] if zone.resolution_virtual_networks else None + ) + return result + + +def main(): + AzureRMDNSZone() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_dnszone_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_dnszone_info.py new file mode 100644 index 000000000..8a37a131d --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_dnszone_info.py @@ -0,0 +1,254 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Obezimnaka Boms, +# +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_dnszone_info + +version_added: "0.1.2" + +short_description: Get DNS zone facts + +description: + - Get facts for a specific DNS zone or all DNS zones within a resource group. + +options: + resource_group: + description: + - Limit results by resource group. Required when filtering by name. + name: + description: + - Only show results for a specific zone. + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Obezimnaka Boms (@ozboms) + +''' + +EXAMPLES = ''' +- name: Get facts for one zone + azure_rm_dnszone_info: + resource_group: myResourceGroup + name: foobar22 + +- name: Get facts for all zones in a resource group + azure_rm_dnszone_info: + resource_group: myResourceGroup + +- name: Get facts by tags + azure_rm_dnszone_info: + tags: + - testing +''' + +RETURN = ''' +azure_dnszones: + description: + - List of zone dicts. + returned: always + type: list + example: [{ + "etag": "00000002-0000-0000-0dcb-df5776efd201", + "location": "global", + "properties": { + "maxNumberOfRecordSets": 5000, + "numberOfRecordSets": 15 + }, + "tags": {} + }] +dnszones: + description: + - List of zone dicts, which share the same layout as azure_rm_dnszone module parameter. + returned: always + type: list + contains: + id: + description: + - id of the DNS Zone. + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/dnszones/azure.com" + name: + description: + - name of the DNS zone. + sample: azure.com + type: + description: + - The type of this DNS zone (C(public) or C(private)). + sample: private + registration_virtual_networks: + description: + - A list of references to virtual networks that register hostnames in this DNS zone. + type: list + sample: ["/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/bar"] + resolution_virtual_networks: + description: + - A list of references to virtual networks that resolve records in this DNS zone. + type: list + sample: ["/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/deadbeef"] + number_of_record_sets: + description: + - The current number of record sets in this DNS zone. + type: int + sample: 2 + max_number_of_record_sets: + description: + - The maximum number of record sets that can be created in this DNS zone. + type: int + sample: 5000 + name_servers: + description: + - The name servers for this DNS zone. + type: list + sample: [ + "ns1-03.azure-dns.com.", + "ns2-03.azure-dns.net.", + "ns3-03.azure-dns.org.", + "ns4-03.azure-dns.info." + ] +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +from ansible.module_utils._text import to_native + +try: + from azure.core.exceptions import ResourceNotFoundError +except Exception: + # This is handled in azure_rm_common + pass + +AZURE_OBJECT_CLASS = 'DnsZone' + + +class AzureRMDNSZoneInfo(AzureRMModuleBase): + + def __init__(self): + + # define user inputs into argument + self.module_arg_spec = dict( + name=dict(type='str'), + resource_group=dict(type='str'), + tags=dict(type='list', elements='str') + ) + + # store the results of the module operation + self.results = dict( + changed=False, + ansible_info=dict(azure_dnszones=[]) + ) + + self.name = None + self.resource_group = None + self.tags = None + + super(AzureRMDNSZoneInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False, facts_module=True) + + def exec_module(self, **kwargs): + + is_old_facts = self.module._name == 'azure_rm_dnszone_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_dnszone_facts' module has been renamed to 'azure_rm_dnszone_info'", version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.name and not self.resource_group: + self.fail("Parameter error: resource group required when filtering by name.") + + results = [] + # list the conditions and what to return based on user input + if self.name is not None: + # if there is a name, facts about that specific zone + results = self.get_item() + elif self.resource_group: + # all the zones listed in that specific resource group + results = self.list_resource_group() + else: + # all the zones in a subscription + results = self.list_items() + + self.results['ansible_info']['azure_dnszones'] = self.serialize_items(results) + self.results['dnszones'] = self.curated_items(results) + + return self.results + + def get_item(self): + self.log('Get properties for {0}'.format(self.name)) + item = None + results = [] + # get specific zone + try: + item = self.dns_client.zones.get(self.resource_group, self.name) + except ResourceNotFoundError: + pass + + # serialize result + if item and self.has_tags(item.tags, self.tags): + results = [item] + return results + + def list_resource_group(self): + self.log('List items for resource group') + try: + response = self.dns_client.zones.list_by_resource_group(self.resource_group) + except Exception as exc: + self.fail("Failed to list for resource group {0} - {1}".format(self.resource_group, str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(item) + return results + + def list_items(self): + self.log('List all items') + try: + response = self.dns_client.zones.list() + except Exception as exc: + self.fail("Failed to list all items - {0}".format(str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(item) + return results + + def serialize_items(self, raws): + return [self.serialize_obj(item, AZURE_OBJECT_CLASS) for item in raws] if raws else [] + + def curated_items(self, raws): + return [self.zone_to_dict(item) for item in raws] if raws else [] + + def zone_to_dict(self, zone): + return dict( + id=zone.id, + name=zone.name, + number_of_record_sets=zone.number_of_record_sets, + max_number_of_record_sets=zone.max_number_of_record_sets, + name_servers=zone.name_servers, + tags=zone.tags, + type=zone.zone_type.lower(), + registration_virtual_networks=[to_native(x.id) for x in zone.registration_virtual_networks] if zone.registration_virtual_networks else None, + resolution_virtual_networks=[to_native(x.id) for x in zone.resolution_virtual_networks] if zone.resolution_virtual_networks else None + ) + + +def main(): + AzureRMDNSZoneInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_eventhub.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_eventhub.py new file mode 100644 index 000000000..ecc8bc424 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_eventhub.py @@ -0,0 +1,438 @@ +#!/usr/bin/python +# +# Copyright (c) 2021 Praveen Ghuge(@praveenghuge) Karl Dasan(@karldas30) Saurabh Malpani (@saurabh3796) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_eventhub +version_added: "1.6.0" +short_description: Manage Event Hub +description: + - Create, update and delete instance of Event Hub. +options: + resource_group: + description: + - Name of the resource group to which the resource belongs. + required: True + type: str + namespace_name: + description: + - Name of the namespace in which to create event hub. + required: True + type: str + name: + description: + - Unique name of the Event Hub. + required: False + type: str + message_retention_in_days: + description: + - Number of days to retain the events for this Event Hub. + required: False + type: int + partition_count: + description: + - Number of partitions created for the Event Hub. + - Range from 1 to 32. + required: False + type: int + status: + description: + - Enumerates the possible values for the status of the Event hub. + default: Active + required: False + type: str + choices: + - Active + - Disabled + - Restoring + - SendDisabled + - ReceiveDisabled + - Creating + - Deleting + - Renaming + - Unknown + location: + description: + - Resource location. If not set, location from the resource group will be used as default. + required: False + type: str + sku: + description: + - The name of the SKU. + - Please see L(https://azure.microsoft.com/en-in/pricing/details/event-hubs/,). + default: Basic + choices: + - Basic + - Standard + type: str + state: + description: + - Assert the state of the Event Hub. + - Use C(present) to create or update an event hub and C(absent) to delete it. + default: present + choices: + - absent + - present + type: str + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Praveen Ghuge (@praveenghuge) + - Karl Dasan (@karldas30) + - Saurabh Malpani(@saurabh3796) +''' +EXAMPLES = ''' + +- name: "Create Event Hub" + azure_rm_eventhub: + resource_group: testgroupans + location: eastus + namespace_name: myNamespace + name: myhub + tags: + - a: b + sku: free + +- name: Delete Event Hub + azure_rm_eventhub: + resource_group: testgroupans + name: myNamespace + state: absent + +- name: "Create Event Hub Namespace" + azure_rm_eventhub: + resource_group: testgroupans + location: eastus + namespace_name: myNamespace + tags: + a: b + sku: free + +- name: Delete Event Hub Namespace + azure_rm_eventhub: + resource_group: testgroupans + namespace_name: myNamespace + state: absent + +''' + +RETURN = ''' +state: + description: + - Current state of the Event Hub namesapce or Event Hub. + returned: always + type: dict + sample: { + "additional_properties": {"location": "East US"}, + "critical": false, + "enabled": true, + "metric_id": null, + "name": "testnaedd3d22d3w", + "namespace_type": "eventHub", + "status": "Active", + "region": null, + "scale_unit": null, + "service_bus_endpoint": "https://testnaedd3d22d3w.servicebus.windows.net:443/", + "sku": "Basic", + "tags": { + "a": "b" + }, + "message_retention_in_days": 7, + "partition_count": 4, + "partition_ids": ["0", "1", "2", "3"], + "updated_at": "2021-04-29T10:05:24.000Z", + "created_at": "2021-04-29T10:05:20.377Z", + "type": "Microsoft.eventHubs/namespaces" + } + +''' + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.mgmt.eventhub.models import Eventhub, EHNamespace + from azure.mgmt.eventhub.models import Sku +except ImportError: + # This is handled in azure_rm_common + pass +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +import time + + +class AzureRMEventHub(AzureRMModuleBase): + + def __init__(self): + # define user inputs from playbook + + self.authorizations_spec = dict( + name=dict(type='str', required=True) + ) + + self.module_arg_spec = dict( + message_retention_in_days=dict(type='int'), + name=dict(type='str'), + namespace_name=dict(type='str', required=True), + partition_count=dict(type='int'), + resource_group=dict(type='str', required=True), + sku=dict(type='str', choices=[ + 'Basic', 'Standard'], default='Basic'), + status=dict(choices=["Active", "Disabled", "Restoring", "SendDisabled", "ReceiveDisabled", "Creating", "Deleting", "Renaming", "Unknown"], + default='Active', type='str'), + state=dict(choices=['present', 'absent'], + default='present', type='str'), + location=dict(type='str') + ) + required_if = [ + ('state', 'present', [ + 'partition_count', 'message_retention_in_days']) + ] + self.sku = None + self.resource_group = None + self.namespace_name = None + self.message_retention_in_days = None + self.name = None + self.location = None + self.authorizations = None + self.tags = None + self.status = None + self.partition_count = None + self.results = dict( + changed=False, + state=dict() + ) + self.state = None + + super(AzureRMEventHub, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + # retrieve resource group to make sure it exists + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + # Set default location + self.location = resource_group.location + + results = dict() + changed = False + + try: + self.log( + 'Fetching Event Hub Namespace {0}'.format(self.name)) + namespace = self.event_hub_client.namespaces.get( + self.resource_group, self.namespace_name) + + results = namespace_to_dict(namespace) + event_hub_results = None + if self.name: + self.log('Fetching event Hub {0}'.format(self.name)) + event_hub = self.event_hub_client.event_hubs.get( + self.resource_group, self.namespace_name, self.name) + event_hub_results = event_hub_to_dict( + event_hub) + # don't change anything if creating an existing namespace, but change if deleting it + if self.state == 'present': + changed = False + + update_tags, results['tags'] = self.update_tags( + results['tags']) + + if update_tags: + changed = True + elif self.namespace_name and not self.name: + if self.sku != results['sku']: + changed = True + elif self.namespace_name and self.name and event_hub_results: + if results['sku'] != 'Basic' and self.message_retention_in_days != event_hub_results['message_retention_in_days']: + self.sku = results['sku'] + changed = True + elif self.state == 'absent': + changed = True + + except Exception: + # the event hub does not exist so create it + if self.state == 'present': + changed = True + else: + # you can't delete what is not there + changed = False + + self.results['changed'] = changed + + if self.name and not changed: + self.results['state'] = event_hub_results + else: + self.results['state'] = results + + # return the results if your only gathering information + if self.check_mode: + return self.results + + if changed: + if self.state == "present": + if self.name is None: + self.results['state'] = self.create_or_update_namespaces() + elif self.namespace_name and self.name: + self.results['state'] = self.create_or_update_event_hub() + elif self.state == "absent": + # delete Event Hub + if self.name is None: + self.delete_namespace() + elif self.namespace_name and self.name: + self.delete_event_hub() + self.results['state']['status'] = 'Deleted' + return self.results + + def create_or_update_namespaces(self): + ''' + create or update namespaces + ''' + try: + namespace_params = EHNamespace( + location=self.location, + sku=Sku(name=self.sku), + tags=self.tags + ) + result = self.event_hub_client.namespaces.begin_create_or_update( + self.resource_group, + self.namespace_name, + namespace_params) + + namespace = self.event_hub_client.namespaces.get( + self.resource_group, + self.namespace_name) + while namespace.provisioning_state == "Created": + time.sleep(30) + namespace = self.event_hub_client.namespaces.get( + self.resource_group, + self.namespace_name, + ) + except Exception as ex: + self.fail("Failed to create namespace {0} in resource group {1}: {2}".format( + self.namespace_name, self.resource_group, str(ex))) + return namespace_to_dict(namespace) + + def create_or_update_event_hub(self): + ''' + Create or update Event Hub. + :return: create or update Event Hub instance state dictionary + ''' + try: + if self.sku == 'Basic': + self.message_retention_in_days = 1 + params = Eventhub( + message_retention_in_days=self.message_retention_in_days, + partition_count=self.partition_count, + status=self.status + ) + result = self.event_hub_client.event_hubs.create_or_update( + self.resource_group, + self.namespace_name, + self.name, + params) + + self.log("Response : {0}".format(result)) + except Exception as ex: + self.fail("Failed to create event hub {0} in resource group {1}: {2}".format( + self.name, self.resource_group, str(ex))) + return event_hub_to_dict(result) + + def delete_event_hub(self): + ''' + Deletes specified event hub + :return True + ''' + self.log("Deleting the event hub {0}".format(self.name)) + try: + result = self.event_hub_client.event_hubs.delete( + self.resource_group, self.namespace_name, self.name) + except Exception as e: + self.log('Error attempting to delete event hub.') + self.fail( + "Error deleting the event hub : {0}".format(str(e))) + return True + + def delete_namespace(self): + ''' + Deletes specified namespace + :return True + ''' + self.log("Deleting the namespace {0}".format(self.namespace_name)) + try: + result = self.event_hub_client.namespaces.begin_delete( + self.resource_group, self.namespace_name) + except Exception as e: + self.log('Error attempting to delete namespace.') + self.fail( + "Error deleting the namespace : {0}".format(str(e))) + return True + + +def event_hub_to_dict(item): + # turn event hub object into a dictionary (serialization) + event_hub = item.as_dict() + result = dict() + if item.additional_properties: + result['additional_properties'] = item.additional_properties + result['name'] = event_hub.get('name', None) + result['partition_ids'] = event_hub.get('partition_ids', None) + result['created_at'] = event_hub.get('created_at', None) + result['updated_at'] = event_hub.get('updated_at', None) + result['message_retention_in_days'] = event_hub.get( + 'message_retention_in_days', None) + result['partition_count'] = event_hub.get('partition_count', None) + result['status'] = event_hub.get('status', None) + result['tags'] = event_hub.get('tags', None) + return result + + +def namespace_to_dict(item): + # turn event hub namespace object into a dictionary (serialization) + namespace = item.as_dict() + result = dict( + additional_properties=namespace.get( + 'additional_properties', {}), + name=namespace.get('name', None), + type=namespace.get('type', None), + location=namespace.get( + 'location', '').replace(' ', '').lower(), + sku=namespace.get("sku").get("name"), + tags=namespace.get('tags', None), + provisioning_state=namespace.get( + 'provisioning_state', None), + region=namespace.get('region', None), + metric_id=namespace.get('metric_id', None), + service_bus_endpoint=namespace.get( + 'service_bus_endpoint', None), + scale_unit=namespace.get('scale_unit', None), + enabled=namespace.get('enabled', None), + critical=namespace.get('critical', None), + data_center=namespace.get('data_center', None), + namespace_type=namespace.get('namespace_type', None), + updated_at=namespace.get('updated_at', None), + created_at=namespace.get('created_at', None), + is_auto_inflate_enabled=namespace.get( + 'is_auto_inflate_enabled', None), + maximum_throughput_units=namespace.get( + 'maximum_throughput_units', None) + ) + return result + + +def main(): + AzureRMEventHub() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_eventhub_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_eventhub_info.py new file mode 100644 index 000000000..9e0084ca6 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_eventhub_info.py @@ -0,0 +1,241 @@ +#!/usr/bin/python +# +# Copyright (c) 2021 Praveen Ghuge(@praveenghuge) Karl Dasan(@karldas30) Saurabh Malpani (@saurabh3796) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_eventhub_info +version_added: "1.6.0" +short_description: Get Azure Event Hub +description: + - Get facts of Azure Event Hub. +options: + resource_group: + description: + - The name of the resource group. + required: True + type: str + namespace_name: + description: + - The name of the namspace. + required: False + type: str + name: + description: + - The name of the Event hub. + type: str +extends_documentation_fragment: + - azure.azcollection.azure +author: + - Saurabh Malpani (@saurabh3796) +''' + + +EXAMPLES = ''' + - name: Get facts of specific Event hub + community.azure.azure_rm_eventhub_info: + resource_group: myResourceGroup + name: myEventHub +''' + +RETURN = ''' +state: + description: + - Current state of the Event Hub namesapce or Event Hub. + returned: always + type: dict + sample: { + "additional_properties": {}, + "created_at": "2021-04-19T12:49:46.597Z", + "critical": null, + "data_center": null, + "status": "Active", + "location": "eastus", + "metric_id": "149f0952-6f3d-48ba-9e98-57011575cbbd:eventhubtestns1753", + "name": "eventhubtestns1753", + "namespace_type": null, + "provisioning_state": "Succeeded", + "region": null, + "scale_unit": null, + "service_bus_endpoint": "https://eventhubtestns1753.servicebus.windows.net:443/", + "sku": "Basic", + "tags": {}, + "type": "Microsoft.EventHub/Namespaces", + "updated_at": "2021-04-19T12:54:33.397Z" + } +''' + +try: + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + + +class AzureRMEventHubInfo(AzureRMModuleBase): + def __init__(self): + + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + namespace_name=dict( + type='str', + required=False + ), + name=dict( + type='str', + required=False + ) + ) + + # store the results of the module operation + self.results = dict( + changed=False) + self.resource_group = None + self.namespace_name = None + self.name = None + self.tags = None + + super(AzureRMEventHubInfo, self).__init__( + self.module_arg_spec, supports_check_mode=True, supports_tags=False) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.name and self.namespace_name and self.resource_group: + results = self.get_event_hub() + self.results['eventhub'] = [ + self.event_hub_to_dict(x) for x in results] + elif self.namespace_name: + results = self.get_namespace() + self.results['namespace'] = [ + self.namespace_to_dict(x) for x in results] + elif self.name is None and self.namespace_name is None: + results = self.list_all_namespace() + self.results['namespaces'] = [ + self.namespace_to_dict(x) for x in results] + return self.results + + def get_namespace(self): + ''' + Get the namespace using resource group and namespace name + ''' + response = None + results = [] + try: + response = self.event_hub_client.namespaces.get( + self.resource_group, self.namespace_name) + self.log("Response : {0}".format(response)) + + except ResourceNotFoundError as e: + self.fail('Could not get info for namespace. {0}').format( + str(e)) + + if response: + results = [response] + return results + + def get_event_hub(self): + ''' + get event hub using resource_group, namespace_name and name. + ''' + response = None + results = [] + + try: + response = self.event_hub_client.event_hubs.get( + self.resource_group, self.namespace_name, self.name) + + except ResourceNotFoundError as e: + self.fail('Could not get info for event hub. {0}').format( + str(e)) + + if response: + results = [response] + return results + + def list_all_namespace(self): + ''' + List all namespaces in particular resource_group + ''' + self.log('List items for resource group') + try: + response = self.event_hub_client.namespaces.list_by_resource_group( + self.resource_group) + + except Exception as exc: + self.fail( + "Failed to list for resource group {0} - {1}".format(self.resource_group, str(exc))) + + results = [] + for item in response: + results.append(item) + return results + + def namespace_to_dict(self, item): + # turn event hub object into a dictionary (serialization) + namespace = item.as_dict() + result = dict( + additional_properties=namespace.get( + 'additional_properties', {}), + name=namespace.get('name', None), + type=namespace.get('type', None), + location=namespace.get( + 'location', '').replace(' ', '').lower(), + sku=namespace.get("sku").get("name"), + tags=namespace.get('tags', None), + provisioning_state=namespace.get( + 'provisioning_state', None), + region=namespace.get('region', None), + metric_id=namespace.get('metric_id', None), + service_bus_endpoint=namespace.get( + 'service_bus_endpoint', None), + scale_unit=namespace.get('scale_unit', None), + enabled=namespace.get('enabled', None), + critical=namespace.get('critical', None), + data_center=namespace.get('data_center', None), + namespace_type=namespace.get('namespace_type', None), + updated_at=namespace.get('updated_at', None), + created_at=namespace.get('created_at', None), + is_auto_inflate_enabled=namespace.get( + 'is_auto_inflate_enabled', None), + maximum_throughput_units=namespace.get( + 'maximum_throughput_units', None) + ) + return result + + def event_hub_to_dict(self, item): + # turn event hub object into a dictionary (serialization) + event_hub = item.as_dict() + result = dict() + if item.additional_properties: + result['additional_properties'] = item.additional_properties + result['name'] = event_hub.get('name', None) + result['partition_ids'] = event_hub.get('partition_ids', None) + result['created_at'] = event_hub.get('created_at', None) + result['updated_at'] = event_hub.get('updated_at', None) + result['message_retention_in_days'] = event_hub.get( + 'message_retention_in_days', None) + result['partition_count'] = event_hub.get('partition_count', None) + result['status'] = event_hub.get('status', None) + result['tags'] = event_hub.get('tags', None) + return result + + +def main(): + AzureRMEventHubInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_expressroute.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_expressroute.py new file mode 100644 index 000000000..93b53dc70 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_expressroute.py @@ -0,0 +1,373 @@ +#!/usr/bin/python +# +# Copyright (c) 2021 Praveen Ghuge (@praveenghuge), Karl Dasan (@ikarldasan), Sakar Mehra (@sakar97) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_expressroute +version_added: "1.7.0" +short_description: Manage Express Route Circuits +description: + - Create, update and delete instance of Express Route. +options: + resource_group: + description: + - Name of the resource group to which the resource belongs. + required: true + type: str + name: + description: + - Unique name of the app service plan to create or update. + required: true + type: str + location: + description: + - Resource location. If not set, location from the resource group will be used as default. + type: str + service_provider_properties: + type: dict + description: + - The service Provider properties + suboptions: + peering_location: + description: + - The peering location + type: str + bandwidth_in_mbps: + description: + - The bandwidth of the circuit when the circuit is provisioned on an ExpressRoutePort resource. + type: str + service_provider_name: + description: + - Name of service provider + type: str + sku: + description: + - The name of the SKU. + - Please see L(https://azure.microsoft.com/en-in/pricing/details/expressroute/,) + - Required sku when I(state=present). + type: dict + suboptions: + tier: + description: + - The tier of the SKU + type: str + required: true + choices: + - standard + - premium + family: + description: + - the family of the SKU + type: str + required: true + choices: + - metereddata + - unlimiteddata + global_reach_enabled: + description: + - Flag denoting global reach status. + type: bool + authorizations: + description: + - The list of authorizations. + type: list + elements: dict + suboptions: + name: + description: Name of the authorization. + required: true + type: str + allow_classic_operations: + description: + - Support for classic operations. + type: bool + state: + description: + - Assert the state of the express route. + - Use C(present) to create or update an express route and C(absent) to delete it. + type: str + default: present + choices: + - absent + - present +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Praveen Ghuge (@praveenghuge) + - Karl Dasan (@ikarldasan) + - Sakar Mehra (@sakar97) +''' +EXAMPLES = ''' +- name: "Create Express route" + azure_rm_expressroute: + resource_group: rg + location: eastus + name: exp + allow_classic_operations: true + global_reach_enabled: false + tags: + - a: b + authorizations: + - name: authorization_test + service_provider_properties: + service_provider_name: Aryaka Networks + peering_location: Seattle + bandwidth_in_mbps: '200' + sku: + tier: premium + family: metereddata + +- name: Delete Express route + azure_rm_expressroute: + resource_group: rg + name: exp + state: absent + +''' + +RETURN = ''' +state: + description: + - Current state of the express route. + returned: always + type: dict + + sample: { + "additional_properties": {}, + "allow_classic_operations": true, + "authorizations": [ + { + "authorization_key": "d83e18b5-0200-4e0b-9cdb-6fdf95b00267", + "authorization_use_status": "Available", + "etag": "W/'09572845-c667-410c-b664-ed8e39242c13'", + "id": "/subscriptions/subs_id/resourceGroups/rg/providers/Microsoft.Network/expressRouteCircuits/exp/authorizations/az", + "name": "authorization_test", + "provisioning_state": "Succeeded", + "type": "Microsoft.Network/expressRouteCircuits/authorizations" + } + ], + "bandwidth_in_gbps": null, + "circuit_provisioning_state": "Enabled", + "express_route_port": null, + "gateway_manager_etag": "", + "global_reach_enabled": false, + "id": "/subscriptions/subs_id/resourceGroups/rg/providers/Microsoft.Network/expressRouteCircuits/exp", + "location": "eastus", + "name": "exp", + "peerings": [], + "provisioning_state": "Succeeded", + "service_key": "e1956383-63b6-4709-8baa-3615bbf5d22b", + "service_provider_notes": null, + "service_provider_provisioning_state": "NotProvisioned", + "stag": 27, + "status": "Deleted", + "tags": { + "a": "b" + }, + "type": "Microsoft.Network/expressRouteCircuits" + } +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.mgmt.network import NetworkManagementClient +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureExpressRoute(AzureRMModuleBase): + + def __init__(self): + # define user inputs from playbook + + self.service_provider_properties_spec = dict( + service_provider_name=dict(type='str'), + peering_location=dict(type='str'), + bandwidth_in_mbps=dict(type='str') + ) + + self.sku_spec = dict( + tier=dict(type='str', choices=[ + 'standard', 'premium'], required=True), + family=dict(type='str', choices=[ + 'unlimiteddata', 'metereddata'], required=True) + ) + + self.authorizations_spec = dict( + name=dict(type='str', required=True) + ) + + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True), + name=dict(type='str', required=True), + location=dict(type='str'), + sku=dict(type='dict', options=self.sku_spec), + allow_classic_operations=dict(type='bool'), + authorizations=dict(type='list', options=self.authorizations_spec, elements='dict'), + state=dict(choices=['present', 'absent'], + default='present', type='str'), + service_provider_properties=dict( + type='dict', options=self.service_provider_properties_spec), + global_reach_enabled=dict(type='bool'), + ) + + self.resource_group = None + self.name = None + self.location = None + self.allow_classic_operations = None + self.authorizations = None + self.service_provider_properties = None + self.global_reach_enabled = None + self.sku = None + self.tags = None + self.state = None + self.results = dict( + changed=False, + state=dict() + ) + + super(AzureExpressRoute, self).__init__(self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + self.results['check_mode'] = self.check_mode + + # retrieve resource group to make sure it exists + resource_group = self.get_resource_group(self.resource_group) + + results = dict() + changed = False + + try: + self.log('Fetching Express Route Circuits {0}'.format(self.name)) + express_route_circuit = self.network_client.express_route_circuits.get( + self.resource_group, self.name) + + results = express_route_to_dict(express_route_circuit) + + # don't change anything if creating an existing zone, but change if deleting it + if self.state == 'present': + changed = False + + update_tags, results['tags'] = self.update_tags( + results['tags']) + if update_tags: + changed = True + + elif self.state == 'absent': + changed = True + + except ResourceNotFoundError: + # the express route does not exist so create it + if self.state == 'present': + changed = True + else: + # you can't delete what is not there + changed = False + + self.results['changed'] = changed + self.results['state'] = results + + # return the results if your only gathering information + if self.check_mode: + return self.results + + if changed: + if self.state == "present": + self.results['state'] = self.create_or_update_express_route( + self.module.params) + elif self.state == "absent": + # delete express route + self.delete_expressroute() + self.results['state']['status'] = 'Deleted' + + return self.results + + def create_or_update_express_route(self, params): + ''' + Create or update Express route. + :return: create or update Express route instance state dictionary + ''' + self.log("create or update Express Route {0}".format(self.name)) + try: + params["sku"]["name"] = params.get("sku").get("tier") + "_" + params.get("sku").get("family") + poller = self.network_client.express_route_circuits.begin_create_or_update( + resource_group_name=params.get("resource_group"), + circuit_name=params.get("name"), + parameters=params) + result = self.get_poller_result(poller) + self.log("Response : {0}".format(result)) + except Exception as ex: + self.fail("Failed to create express route {0} in resource group {1}: {2}".format( + self.name, self.resource_group, str(ex))) + return express_route_to_dict(result) + + def delete_expressroute(self): + ''' + Deletes specified express route circuit + :return True + ''' + self.log("Deleting the express route {0}".format(self.name)) + try: + poller = self.network_client.express_route_circuits.begin_delete( + self.resource_group, self.name) + result = self.get_poller_result(poller) + except Exception as e: + self.log('Error attempting to delete express route.') + self.fail( + "Error deleting the express route : {0}".format(str(e))) + return result + + +def express_route_to_dict(item): + # turn express route object into a dictionary (serialization) + express_route = item.as_dict() + result = dict( + additional_properties=express_route.get('additional_properties', {}), + id=express_route.get('id', None), + name=express_route.get('name', None), + type=express_route.get('type', None), + location=express_route.get('location', '').replace(' ', '').lower(), + tags=express_route.get('tags', None), + allow_classic_operations=express_route.get( + 'allow_classic_operations', None), + circuit_provisioning_state=express_route.get( + 'circuit_provisioning_state', None), + service_provider_provisioning_state=express_route.get( + 'service_provider_provisioning_state', None), + authorizations=express_route.get('authorizations', []), + peerings=express_route.get('peerings', []), + service_key=express_route.get('service_key', None), + service_provider_notes=express_route.get( + 'service_provider_notes', None), + express_route_port=express_route.get('express_route_port', None), + bandwidth_in_gbps=express_route.get('bandwidth_in_gbps', None), + stag=express_route.get('stag', None), + provisioning_state=express_route.get('provisioning_state', None), + gateway_manager_etag=express_route.get('gateway_manager_etag', ''), + global_reach_enabled=express_route.get('global_reach_enabled', '') + ) + return result + + +def main(): + AzureExpressRoute() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_expressroute_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_expressroute_info.py new file mode 100644 index 000000000..fead26069 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_expressroute_info.py @@ -0,0 +1,212 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 Praveen Ghuge (@praveenghuge), Karl Dasan (@ikarldasan), Sakar Mehra (@sakar97) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_expressroute_info +version_added: "1.7.0" +short_description: Get Azure Express Route +description: + - Get facts of Azure Express Route. + +options: + resource_group: + description: + - The name of the resource group. + required: True + type: str + name: + description: + - The name of the express route. + type: str + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Praveen Ghuge (@praveenghuge) + - Karl Dasan (@ikarldasan) + - Sakar Mehra (@sakar97) + +''' + + +EXAMPLES = ''' + - name: Get facts of specific expressroute + community.azure.azure_rm_expressroute_info: + resource_group: myResourceGroup + name: myExpressRoute + tags: + - key:value + +''' + +RETURN = ''' +state: + description: + - Current state of the express route. + returned: always + type: dict + + sample: { + "additional_properties": {}, + "allow_classic_operations": true, + "authorizations": [ + { + "authorization_key": "d83e18b5-0200-4e0b-9cdb-6fdf95b00267", + "authorization_use_status": "Available", + "etag": "W/'09572845-c667-410c-b664-ed8e39242c13'", + "id": "/subscriptions/subs_id/resourceGroups/rg/providers/Microsoft.Network/expressRouteCircuits/exp/authorizations/az", + "name": "authorization_test", + "provisioning_state": "Succeeded", + "type": "Microsoft.Network/expressRouteCircuits/authorizations" + } + ], + "bandwidth_in_gbps": null, + "circuit_provisioning_state": "Enabled", + "express_route_port": null, + "gateway_manager_etag": "", + "global_reach_enabled": false, + "id": "/subscriptions/subs_id/resourceGroups/rg/providers/Microsoft.Network/expressRouteCircuits/exp", + "location": "eastus", + "name": "exp", + "peerings": [], + "provisioning_state": "Succeeded", + "service_key": "e1956383-63b6-4709-8baa-3615bbf5d22b", + "service_provider_notes": null, + "service_provider_provisioning_state": "NotProvisioned", + "stag": 27, + "status": "Deleted", + "tags": { + "a": "b" + }, + "type": "Microsoft.Network/expressRouteCircuits" + } +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.mgmt.network import NetworkManagementClient + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureExpressRouteInfo(AzureRMModuleBase): + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str' + ), + tags=dict( + type='list', + elements='str' + ) + ) + # store the results of the module operation + self.results = dict( + changed=False) + self.resource_group = None + self.name = None + self.tags = None + + super(AzureExpressRouteInfo, self).__init__( + self.module_arg_spec, supports_check_mode=True, supports_tags=False, facts_module=True) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.name is not None: + results = self.get() + elif self.resource_group: + # all the express route listed in that specific resource group + results = self.list_resource_group() + + self.results['expressroute'] = [ + self.express_route_to_dict(x) for x in results] + return self.results + + def get(self): + response = None + results = [] + try: + response = self.network_client.express_route_circuits.get( + self.resource_group, self.name) + self.log("Response : {0}".format(response)) + except ResourceNotFoundError as e: + self.fail('Could not get info for express route. {0}').format(str(e)) + + if response and self.has_tags(response.tags, self.tags): + results = [response] + return results + + def list_resource_group(self): + self.log('List items for resource group') + try: + response = self.network_client.express_route_circuits.list( + self.resource_group) + + except ResourceNotFoundError as exc: + self.fail( + "Failed to list for resource group {0} - {1}".format(self.resource_group, str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(item) + return results + + def express_route_to_dict(self, item): + # turn express route object into a dictionary (serialization) + express_route = item.as_dict() + result = dict( + additional_properties=express_route.get('additional_properties', {}), + id=express_route.get('id', None), + name=express_route.get('name', None), + type=express_route.get('type', None), + location=express_route.get('location', '').replace(' ', '').lower(), + tags=express_route.get('tags', None), + allow_classic_operations=express_route.get('allow_classic_operations', None), + circuit_provisioning_state=express_route.get( + 'circuit_provisioning_state', None), + service_provider_provisioning_state=express_route.get( + 'service_provider_provisioning_state', None), + authorizations=express_route.get('authorizations', []), + peerings=express_route.get('peerings', []), + service_key=express_route.get('service_key', None), + service_provider_notes=express_route.get('service_provider_notes', None), + express_route_port=express_route.get('express_route_port', None), + bandwidth_in_gbps=express_route.get('bandwidth_in_gbps', None), + stag=express_route.get('stag', None), + provisioning_state=express_route.get('provisioning_state', None), + gateway_manager_etag=express_route.get('gateway_manager_etag', ''), + global_reach_enabled=express_route.get('global_reach_enabled', '') + ) + return result + + +def main(): + AzureExpressRouteInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_firewallpolicy.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_firewallpolicy.py new file mode 100644 index 000000000..e7dfdafe2 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_firewallpolicy.py @@ -0,0 +1,465 @@ +#!/usr/bin/python +# +# Copyright (c) 2021 Aparna Patil(@aparna-patil) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_firewallpolicy + +version_added: "1.13.0" + +short_description: Create, delete or update specified firewall policy. + +description: + - Creates, deletes, or updates given firewall policy in specified resource group. + +options: + resource_group: + description: + - Name of the resource group. + required: true + type: str + name: + description: + - The name of the firewall policy. + required: true + type: str + location: + description: + - Location for firewall policy. Defaults to location of resource group if not specified. + type: str + base_policy: + description: + - The name of the parent firewall policy from which rules are inherited. + type: str + threat_intel_mode: + description: + - The operation mode for Threat Intel. + default: alert + type: str + choices: + - alert + - deny + - 'off' + threat_intel_whitelist: + description: + - ThreatIntel Whitelist for Firewall Policy. + type: dict + suboptions: + ip_addresses: + description: + - List of IP addresses for the ThreatIntel Whitelist. + type: list + elements: str + append_ip_addresses: + description: + - Flag to indicate if the ip_addresses to be appended or not. + type: bool + default: true + fqdns: + description: + - List of FQDNs for the ThreatIntel Whitelist + type: list + elements: str + append_fqdns: + description: + - Flag to indicate if the fqdns to be appended or not. + type: bool + default: true + state: + description: + - Assert the state of the firewall policy. Use C(present) to create or update and C(absent) to delete. + default: present + type: str + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Aparna Patil (@aparna-patil) +''' + +EXAMPLES = ''' +- name: Create a Firewall Policy + azure_rm_firewallpolicy: + resource_group: myAzureResourceGroup + name: myfirewallpolicy + base_policy: firewallparentpolicy + threat_intel_mode: alert + threat_intel_whitelist: + ip_addresses: + - 10.0.0.1 + - 10.0.0.2 + fqdns: + - "*.microsoft.com" + - "*.azure.com" + state: present + +- name: Update Firewall Policy + azure_rm_firewallpolicy: + resource_group: myAzureResourceGroup + name: myfirewallpolicy + base_policy: firewallparentpolicy + threat_intel_mode: deny + threat_intel_whitelist: + ip_addresses: + - 10.0.0.1 + fqdns: + - "*.microsoft.com" + state: present + tags: + key1: "value1" + +- name: Delete Firewall Policy + azure_rm_firewallpolicy: + resource_group: myAzureResourceGroup + name: myfirewallpolicy + state: absent +''' + +RETURN = ''' +state: + description: + - Current state of the Firewall Policy. + returned: always + type: complex + contains: + id: + description: + - The firewall policy ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/MyAzureResourceGroup/providers/ + Microsoft.Network/firewallPolicies/myfirewallpolicy" + name: + description: + - The firewall policy name. + returned: always + type: str + sample: 'myfirewallpolicy' + location: + description: + - The Azure Region where the resource lives. + returned: always + type: str + sample: eastus + base_policy: + description: + - The parent firewall policy from which rules are inherited. + returned: always + type: dict + sample: { + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/MyAzureResourceGroup/providers/ + Microsoft.Network/firewallPolicies/firewallparentpolicy" + } + child_policies: + description: + - List of references to Child Firewall Policies. + returned: always + type: list + elements: dict + sample: [ + { + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/MyAzureResourceGroup/providers/ + Microsoft.Network/firewallPolicies/childpolicy1" + } + ] + provisioning_state: + description: + - The provisioning state of the resource. + returned: always + type: str + sample: Succeeded + firewalls: + description: + - List of references to Azure Firewalls that this Firewall Policy is associated with. + returned: always + type: list + elements: dict + sample: [ + { + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myAzureResourceGroup/providers/ + Microsoft.Network/azureFirewalls/azurefirewall" + } + ] + rule_collection_groups: + description: + - List of references to FirewallPolicyRuleCollectionGroups. + returned: always + type: list + elements: dict + sample: [ + { + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/MyAzureResourceGroup/providers/ + Microsoft.Network/firewallPolicies/myfirewallpolicy/ + ruleCollectionGroups/DefaultNetworkRuleCollectionGroup" + } + ] + threat_intel_mode: + description: + - The operation mode for Threat Intelligence. + returned: always + type: str + sample: Alert + threat_intel_whitelist: + description: + - ThreatIntel Whitelist for Firewall Policy. + returned: always + type: dict + sample: { + "fqdns": [ + "*.microsoft.com", + "*.azure.com" + ], + "ip_addresses": [ + "10.0.0.1", + "10.0.0.2" + ] + } + tags: + description: + - Resource tags. + returned: always + type: list + sample: [{"key1": "value1"}] + type: + description: + - The type of resource. + returned: always + type: str + sample: Microsoft.Network/FirewallPolicies + etag: + description: + - The etag of the firewall policy. + returned: always + type: str + sample: 7cb2538e-0e52-4435-8979-4f417e7269d1 +''' + +from ansible.module_utils.basic import _load_params +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase, HAS_AZURE, \ + format_resource_id, normalize_location_name +import copy + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.core.polling import LROPoller +except ImportError: + # This is handled in azure_rm_common + pass + +threat_intel_whitelist_spec = dict( + ip_addresses=dict(type='list', elements='str'), + append_ip_addresses=dict(type='bool', default=True), + fqdns=dict(type='list', elements='str'), + append_fqdns=dict(type='bool', default=True) +) + + +class AzureRMFirewallPolicy(AzureRMModuleBase): + + def __init__(self): + + _load_params() + # define user inputs from playbook + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True), + name=dict(type='str', required=True), + location=dict(type='str'), + base_policy=dict(type='str'), + threat_intel_mode=dict(choices=['alert', 'deny', 'off'], default='alert', type='str'), + threat_intel_whitelist=dict(type='dict', options=threat_intel_whitelist_spec), + state=dict(choices=['present', 'absent'], default='present', type='str'), + ) + + self.results = dict( + changed=False, + state=dict() + ) + + self.resource_group = None + self.name = None + self.state = None + self.location = None + self.base_policy = None + self.threat_intel_mode = None + self.threat_intel_whitelist = None + self.tags = None + + super(AzureRMFirewallPolicy, self).__init__(self.module_arg_spec, + supports_tags=True, + supports_check_mode=True) + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + changed = False + results = dict() + firewall_policy_old = None + firewall_policy_new = None + update_ip_address = False + update_fqdns = False + + # retrieve resource group to make sure it exists + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + # Set default location + self.location = resource_group.location + + self.location = normalize_location_name(self.location) + + if self.base_policy: + base_policy = self.parse_resource_to_dict(self.base_policy) + self.base_policy = format_resource_id(val=base_policy['name'], + subscription_id=base_policy['subscription_id'], + namespace='Microsoft.Network', + types='firewallPolicies', + resource_group=base_policy['resource_group']) + + try: + self.log('Fetching Firewall policy {0}'.format(self.name)) + firewall_policy_old = self.network_client.firewall_policies.get(self.resource_group, self.name) + # serialize object into a dictionary + results = self.firewallpolicy_to_dict(firewall_policy_old) + if self.state == 'present': + changed = False + update_tags, results['tags'] = self.update_tags(results['tags']) + if update_tags: + changed = True + self.tags = results['tags'] + if self.base_policy is not None: + if ('base_policy' not in results and self.base_policy != "") or \ + ('base_policy' in results and self.base_policy != results['base_policy']['id']): + changed = True + results['base_policy'] = self.base_policy + if self.threat_intel_mode is not None and \ + self.threat_intel_mode.lower() != results['threat_intel_mode'].lower(): + changed = True + results['threat_intel_mode'] = self.threat_intel_mode + if self.threat_intel_whitelist is not None: + if 'threat_intel_whitelist' not in results: + changed = True + results['threat_intel_whitelist'] = self.threat_intel_whitelist + else: + update_ip_addresses, results['threat_intel_whitelist']['ip_addresses'] = \ + self.update_values(results['threat_intel_whitelist']['ip_addresses'] + if 'ip_addresses' in results['threat_intel_whitelist'] else [], + self.threat_intel_whitelist['ip_addresses'] + if self.threat_intel_whitelist['ip_addresses'] is not None else [], + self.threat_intel_whitelist['append_ip_addresses']) + update_fqdns, results['threat_intel_whitelist']['fqdns'] = \ + self.update_values(results['threat_intel_whitelist']['fqdns'] + if 'fqdns' in results['threat_intel_whitelist'] else [], + self.threat_intel_whitelist['fqdns'] + if self.threat_intel_whitelist['fqdns'] is not None else [], + self.threat_intel_whitelist['append_fqdns']) + if update_ip_addresses: + changed = True + self.threat_intel_whitelist['ip_addresses'] = results['threat_intel_whitelist']['ip_addresses'] + if update_fqdns: + changed = True + self.threat_intel_whitelist['fqdns'] = results['threat_intel_whitelist']['fqdns'] + elif self.state == 'absent': + changed = True + + except ResourceNotFoundError: + if self.state == 'present': + changed = True + else: + changed = False + + self.results['changed'] = changed + self.results['state'] = results + + if self.check_mode: + return self.results + + if changed: + if self.state == 'present': + # create or update firewall policy + firewall_policy_new = \ + self.network_models.FirewallPolicy(location=self.location, + threat_intel_mode=self.threat_intel_mode) + if self.base_policy: + firewall_policy_new.base_policy = \ + self.network_models.FirewallPolicy(id=self.base_policy) + if self.threat_intel_whitelist: + firewall_policy_new.threat_intel_whitelist = self.network_models.FirewallPolicyThreatIntelWhitelist( + ip_addresses=self.threat_intel_whitelist['ip_addresses'], + fqdns=self.threat_intel_whitelist['fqdns'] + ) + if self.tags: + firewall_policy_new.tags = self.tags + self.results['state'] = self.create_or_update_firewallpolicy(firewall_policy_new) + + elif self.state == 'absent': + # delete firewall policy + self.delete_firewallpolicy() + self.results['state'] = 'Deleted' + + return self.results + + def create_or_update_firewallpolicy(self, firewall_policy): + try: + # create a firewall policy + response = self.network_client.firewall_policies.begin_create_or_update(resource_group_name=self.resource_group, + firewall_policy_name=self.name, + parameters=firewall_policy) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + except Exception as exc: + self.fail("Error creating or updating Firewall policy {0} - {1}".format(self.name, str(exc))) + return self.firewallpolicy_to_dict(response) + + def delete_firewallpolicy(self): + try: + # delete a firewall policy + response = self.network_client.firewall_policies.begin_delete(resource_group_name=self.resource_group, + firewall_policy_name=self.name) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + except Exception as exc: + self.fail("Error deleting Firewall policy {0} - {1}".format(self.name, str(exc))) + return response + + def update_values(self, existing_values, param_values, append): + # comparing input values with existing values for given parameter + + new_values = copy.copy(existing_values) + changed = False + + # check add or update + for item in param_values: + if item not in new_values: + changed = True + new_values.append(item) + # check remove + if not append: + for item in existing_values: + if item not in param_values: + new_values.remove(item) + changed = True + return changed, new_values + + def firewallpolicy_to_dict(self, firewallpolicy): + result = firewallpolicy.as_dict() + result['tags'] = firewallpolicy.tags + return result + + +def main(): + AzureRMFirewallPolicy() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_firewallpolicy_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_firewallpolicy_info.py new file mode 100644 index 000000000..2c5c6c5dc --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_firewallpolicy_info.py @@ -0,0 +1,238 @@ +#!/usr/bin/python +# +# Copyright (c) 2021 Aparna Patil(@aparna-patil) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_firewallpolicy_info + +version_added: "1.13.0" + +short_description: Get firewall policy facts + +description: + - Get facts for specified firewall policy or all firewall policies in a given resource group. + +options: + resource_group: + description: + - Name of the resource group. + type: str + name: + description: + - Name of the Firewall policy. + type: str + tags: + description: + - Limit the results by providing resource tags. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Aparna Patil (@aparna-patil) + +''' + +EXAMPLES = ''' +- name: Get facts for one firewall policy + azure_rm_firewallpolicy_info: + resource_group: myAzureResourceGroup + name: myfirewallpolicy + +- name: Get facts for all firewall policies in resource group + azure_rm_firewallpolicy_info: + resource_group: myAzureResourceGroup +''' + +RETURN = ''' +firewallpolicies: + description: + - Gets a list of firewall policies. + returned: always + type: list + elements: dict + sample: [ + { + "base_policy": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/MyAzureResourceGroup/ + providers/Microsoft.Network/firewallPolicies/firewallparentpolicy", + "child_policies": [], + "dns_settings": { + "enable_proxy": null, + "require_proxy_for_network_rules": null, + "servers": [] + }, + "etag": "a7b62add-9a6d-42bc-80ff-c288799e3561", + "firewalls": [], + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/MyAzureResourceGroup/providers/ + Microsoft.Network/firewallPolicies/myfirewallpolicy", + "location": "eastus", + "name": "myfirewallpolicy", + "provisioning_state": "Succeeded", + "rule_collection_groups": [ + { + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/MyAzureResourceGroup/ + providers/Microsoft.Network/firewallPolicies/myfirewallpolicy/ + ruleCollectionGroups/DefaultNetworkRuleCollectionGroup" + } + ], + "tags": { + "key1": "value1" + }, + "threat_intel_mode": "Deny", + "threat_intel_whitelist": { + "fqdns": [ + "*.microsoft.com" + ], + "ip_addresses": [ + "10.0.0.1" + ] + }, + "type": "Microsoft.Network/FirewallPolicies" + } + ] +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.common import AzureMissingResourceHttpError, AzureHttpError +except Exception: + # This is handled in azure_rm_common + pass + +AZURE_OBJECT_CLASS = 'FirewallPolicy' + + +class AzureRMFirewallPolicyInfo(AzureRMModuleBase): + + def __init__(self): + + # define user inputs variables + self.module_arg_spec = dict( + name=dict(type='str'), + resource_group=dict(type='str'), + tags=dict(type='list', elements='str') + ) + + # store the results of the module operation + self.results = dict( + changed=False + ) + + self.name = None + self.resource_group = None + self.tags = None + + super(AzureRMFirewallPolicyInfo, self).__init__(self.module_arg_spec, + supports_check_mode=True, + facts_module=True, + supports_tags=False) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + results = [] + # list the conditions and results to return based on user input + if self.name is not None: + # if firewall policy name is provided, then return facts about that specific firewall policy + results = self.get_item() + elif self.resource_group: + # all the firewall policies listed in specific resource group + results = self.list_resource_group() + else: + # all the firewall policies in a subscription + results = self.list_items() + + self.results['firewallpolicies'] = self.curated_items(results) + + return self.results + + def get_item(self): + self.log('Get properties for Firewall policy - {0}'.format(self.name)) + item = None + results = [] + # get specific Firewall policy + try: + item = self.network_client.firewall_policies.get(self.resource_group, self.name) + except ResourceNotFoundError: + pass + + # serialize result + if item and self.has_tags(item.tags, self.tags): + results = [item] + return results + + def list_resource_group(self): + self.log('List all Firewall policies for resource group - {0}'.format(self.resource_group)) + try: + response = self.network_client.firewall_policies.list(self.resource_group) + except Exception as exc: + self.fail("Failed to list firewall policies for resource group {0} - {1}".format(self.resource_group, + str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(item) + return results + + def list_items(self): + self.log('List all the Firewall Policies in a subscription.') + try: + response = self.network_client.firewall_policies.list_all() + except Exception as exc: + self.fail("Failed to list all items - {0}".format(str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(item) + return results + + def curated_items(self, raws): + return [self.firewallpolicy_to_dict(item) for item in raws] if raws else [] + + def firewallpolicy_to_dict(self, firewallpolicy): + result = dict( + id=firewallpolicy.id, + name=firewallpolicy.name, + location=firewallpolicy.location, + tags=firewallpolicy.tags, + rule_collection_groups=[dict(id=x.id) for x in firewallpolicy.rule_collection_groups], + provisioning_state=firewallpolicy.provisioning_state, + base_policy=firewallpolicy.base_policy.id if firewallpolicy.base_policy is not None else None, + firewalls=[dict(id=x.id) for x in firewallpolicy.firewalls], + child_policies=[dict(id=x.id) for x in firewallpolicy.child_policies], + threat_intel_mode=firewallpolicy.threat_intel_mode, + threat_intel_whitelist=dict( + ip_addresses=firewallpolicy.threat_intel_whitelist.ip_addresses, + fqdns=firewallpolicy.threat_intel_whitelist.fqdns + ) if firewallpolicy.threat_intel_whitelist is not None else dict(), + dns_settings=dict( + enable_proxy=firewallpolicy.dns_settings.enable_proxy, + servers=firewallpolicy.dns_settings.servers, + require_proxy_for_network_rules=firewallpolicy.dns_settings.require_proxy_for_network_rules + )if firewallpolicy.dns_settings is not None else dict(), + etag=firewallpolicy.etag, + type=firewallpolicy.type + ) + return result + + +def main(): + AzureRMFirewallPolicyInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_functionapp.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_functionapp.py new file mode 100644 index 000000000..cf0bc00b0 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_functionapp.py @@ -0,0 +1,402 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Thomas Stringer +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_functionapp +version_added: "0.1.2" +short_description: Manage Azure Function Apps +description: + - Create, update or delete an Azure Function App. +options: + resource_group: + description: + - Name of resource group. + required: true + aliases: + - resource_group_name + name: + description: + - Name of the Azure Function App. + required: true + location: + description: + - Valid Azure location. Defaults to location of the resource group. + plan: + description: + - App service plan. + - It can be name of existing app service plan in same resource group as function app. + - It can be resource id of existing app service plan. + - Resource id. For example /subscriptions//resourceGroups//providers/Microsoft.Web/serverFarms/. + - It can be a dict which contains C(name), C(resource_group). + - C(name). Name of app service plan. + - C(resource_group). Resource group name of app service plan. + container_settings: + description: Web app container settings. + suboptions: + name: + description: + - Name of container. For example "imagename:tag". + registry_server_url: + description: + - Container registry server url. For example C(mydockerregistry.io). + registry_server_user: + description: + - The container registry server user name. + registry_server_password: + description: + - The container registry server password. + storage_account: + description: + - Name of the storage account to use. + required: true + aliases: + - storage + - storage_account_name + app_settings: + description: + - Dictionary containing application settings. + state: + description: + - Assert the state of the Function App. Use C(present) to create or update a Function App and C(absent) to delete. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Thomas Stringer (@trstringer) +''' + +EXAMPLES = ''' +- name: Create a function app + azure_rm_functionapp: + resource_group: myResourceGroup + name: myFunctionApp + storage_account: myStorageAccount + +- name: Create a function app with app settings + azure_rm_functionapp: + resource_group: myResourceGroup + name: myFunctionApp + storage_account: myStorageAccount + app_settings: + setting1: value1 + setting2: value2 + +- name: Create container based function app + azure_rm_functionapp: + resource_group: myResourceGroup + name: myFunctionApp + storage_account: myStorageAccount + plan: + resource_group: myResourceGroup + name: myAppPlan + container_settings: + name: httpd + registry_server_url: index.docker.io + +- name: Delete a function app + azure_rm_functionapp: + resource_group: myResourceGroup + name: myFunctionApp + state: absent +''' + +RETURN = ''' +state: + description: + - Current state of the Azure Function App. + returned: success + type: dict + example: + id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/sites/myFunctionApp + name: myfunctionapp + kind: functionapp + location: East US + type: Microsoft.Web/sites + state: Running + host_names: + - myfunctionapp.azurewebsites.net + repository_site_name: myfunctionapp + usage_state: Normal + enabled: true + enabled_host_names: + - myfunctionapp.azurewebsites.net + - myfunctionapp.scm.azurewebsites.net + availability_state: Normal + host_name_ssl_states: + - name: myfunctionapp.azurewebsites.net + ssl_state: Disabled + host_type: Standard + - name: myfunctionapp.scm.azurewebsites.net + ssl_state: Disabled + host_type: Repository + server_farm_id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/serverfarms/EastUSPlan + reserved: false + last_modified_time_utc: '2017-08-22T18:54:01.190Z' + scm_site_also_stopped: false + client_affinity_enabled: true + client_cert_enabled: false + host_names_disabled: false + outbound_ip_addresses: ............ + container_size: 1536 + daily_memory_time_quota: 0 + resource_group: myResourceGroup + default_host_name: myfunctionapp.azurewebsites.net +''' # NOQA + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.mgmt.web.models import Site, SiteConfig, NameValuePair +except ImportError: + # This is handled in azure_rm_common + pass + +container_settings_spec = dict( + name=dict(type='str', required=True), + registry_server_url=dict(type='str'), + registry_server_user=dict(type='str'), + registry_server_password=dict(type='str', no_log=True) +) + + +class AzureRMFunctionApp(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True, aliases=['resource_group_name']), + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + location=dict(type='str'), + storage_account=dict( + type='str', + aliases=['storage', 'storage_account_name'] + ), + app_settings=dict(type='dict'), + plan=dict( + type='raw' + ), + container_settings=dict( + type='dict', + options=container_settings_spec + ) + ) + + self.results = dict( + changed=False, + state=dict() + ) + + self.resource_group = None + self.name = None + self.state = None + self.location = None + self.storage_account = None + self.app_settings = None + self.plan = None + self.container_settings = None + + required_if = [('state', 'present', ['storage_account'])] + + super(AzureRMFunctionApp, self).__init__( + self.module_arg_spec, + supports_check_mode=True, + required_if=required_if + ) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + if self.app_settings is None: + self.app_settings = dict() + + try: + resource_group = self.rm_client.resource_groups.get(self.resource_group) + except Exception: + self.fail('Unable to retrieve resource group') + + self.location = self.location or resource_group.location + + try: + function_app = self.web_client.web_apps.get(resource_group_name=self.resource_group, name=self.name) + # Newer SDK versions (0.40.0+) seem to return None if it doesn't exist instead of raising error + exists = function_app is not None + except ResourceNotFoundError as exc: + exists = False + + if self.state == 'absent': + if exists: + if self.check_mode: + self.results['changed'] = True + return self.results + try: + self.web_client.web_apps.delete(resource_group_name=self.resource_group, name=self.name) + self.results['changed'] = True + except Exception as exc: + self.fail('Failure while deleting web app: {0}'.format(exc)) + else: + self.results['changed'] = False + else: + kind = 'functionapp' + linux_fx_version = None + if self.container_settings and self.container_settings.get('name'): + kind = 'functionapp,linux,container' + linux_fx_version = 'DOCKER|' + if self.container_settings.get('registry_server_url'): + self.app_settings['DOCKER_REGISTRY_SERVER_URL'] = 'https://' + self.container_settings['registry_server_url'] + linux_fx_version += self.container_settings['registry_server_url'] + '/' + linux_fx_version += self.container_settings['name'] + if self.container_settings.get('registry_server_user'): + self.app_settings['DOCKER_REGISTRY_SERVER_USERNAME'] = self.container_settings.get('registry_server_user') + + if self.container_settings.get('registry_server_password'): + self.app_settings['DOCKER_REGISTRY_SERVER_PASSWORD'] = self.container_settings.get('registry_server_password') + + if not self.plan and exists: + self.plan = function_app.server_farm_id + + if not exists: + function_app = Site( + location=self.location, + kind=kind, + site_config=SiteConfig( + app_settings=self.aggregated_app_settings(), + scm_type='LocalGit' + ) + ) + self.results['changed'] = True + else: + self.results['changed'], function_app = self.update(function_app) + + # get app service plan + if self.plan: + if isinstance(self.plan, dict): + self.plan = "/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Web/serverfarms/{2}".format( + self.subscription_id, + self.plan.get('resource_group', self.resource_group), + self.plan.get('name') + ) + function_app.server_farm_id = self.plan + + # set linux fx version + if linux_fx_version: + function_app.site_config.linux_fx_version = linux_fx_version + + if self.check_mode: + self.results['state'] = function_app.as_dict() + elif self.results['changed']: + try: + response = self.web_client.web_apps.begin_create_or_update(resource_group_name=self.resource_group, + name=self.name, + site_envelope=function_app) + new_function_app = self.get_poller_result(response) + self.results['state'] = new_function_app.as_dict() + except Exception as exc: + self.fail('Error creating or updating web app: {0}'.format(exc)) + + return self.results + + def update(self, source_function_app): + """Update the Site object if there are any changes""" + + source_app_settings = self.web_client.web_apps.list_application_settings( + resource_group_name=self.resource_group, + name=self.name + ) + + changed, target_app_settings = self.update_app_settings(source_app_settings.properties) + + source_function_app.site_config = SiteConfig( + app_settings=target_app_settings, + scm_type='LocalGit' + ) + + return changed, source_function_app + + def update_app_settings(self, source_app_settings): + """Update app settings""" + + target_app_settings = self.aggregated_app_settings() + target_app_settings_dict = dict([(i.name, i.value) for i in target_app_settings]) + return target_app_settings_dict != source_app_settings, target_app_settings + + def necessary_functionapp_settings(self): + """Construct the necessary app settings required for an Azure Function App""" + + function_app_settings = [] + + if self.container_settings is None: + for key in ['AzureWebJobsStorage', 'WEBSITE_CONTENTAZUREFILECONNECTIONSTRING', 'AzureWebJobsDashboard']: + function_app_settings.append(NameValuePair(name=key, value=self.storage_connection_string)) + function_app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION', value='~1')) + function_app_settings.append(NameValuePair(name='WEBSITE_NODE_DEFAULT_VERSION', value='6.5.0')) + function_app_settings.append(NameValuePair(name='WEBSITE_CONTENTSHARE', value=self.name)) + else: + function_app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION', value='~2')) + function_app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE', value=False)) + function_app_settings.append(NameValuePair(name='AzureWebJobsStorage', value=self.storage_connection_string)) + + return function_app_settings + + def aggregated_app_settings(self): + """Combine both system and user app settings""" + + function_app_settings = self.necessary_functionapp_settings() + for app_setting_key in self.app_settings: + found_setting = None + for s in function_app_settings: + if s.name == app_setting_key: + found_setting = s + break + if found_setting: + found_setting.value = self.app_settings[app_setting_key] + else: + function_app_settings.append(NameValuePair( + name=app_setting_key, + value=self.app_settings[app_setting_key] + )) + return function_app_settings + + @property + def storage_connection_string(self): + """Construct the storage account connection string""" + + return 'DefaultEndpointsProtocol=https;AccountName={0};AccountKey={1}'.format( + self.storage_account, + self.storage_key + ) + + @property + def storage_key(self): + """Retrieve the storage account key""" + + return self.storage_client.storage_accounts.list_keys( + resource_group_name=self.resource_group, + account_name=self.storage_account + ).keys[0].value + + +def main(): + """Main function execution""" + + AzureRMFunctionApp() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_functionapp_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_functionapp_info.py new file mode 100644 index 000000000..3b4904e35 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_functionapp_info.py @@ -0,0 +1,201 @@ +#!/usr/bin/python +# +# Copyright (c) 2016 Thomas Stringer, + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_functionapp_info +version_added: "0.1.2" +short_description: Get Azure Function App facts +description: + - Get facts for one Azure Function App or all Function Apps within a resource group. +options: + name: + description: + - Only show results for a specific Function App. + resource_group: + description: + - Limit results to a resource group. Required when filtering by name. + aliases: + - resource_group_name + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Thomas Stringer (@trstringer) +''' + +EXAMPLES = ''' + - name: Get facts for one Function App + azure_rm_functionapp_info: + resource_group: myResourceGroup + name: myfunctionapp + + - name: Get facts for all Function Apps in a resource group + azure_rm_functionapp_info: + resource_group: myResourceGroup + + - name: Get facts for all Function Apps by tags + azure_rm_functionapp_info: + tags: + - testing +''' + +RETURN = ''' +azure_functionapps: + description: + - List of Azure Function Apps dicts. + returned: always + type: list + example: + id: /subscriptions/.../resourceGroups/ansible-rg/providers/Microsoft.Web/sites/myfunctionapp + name: myfunctionapp + kind: functionapp + location: East US + type: Microsoft.Web/sites + state: Running + host_names: + - myfunctionapp.azurewebsites.net + repository_site_name: myfunctionapp + usage_state: Normal + enabled: true + enabled_host_names: + - myfunctionapp.azurewebsites.net + - myfunctionapp.scm.azurewebsites.net + availability_state: Normal + host_name_ssl_states: + - name: myfunctionapp.azurewebsites.net + ssl_state: Disabled + host_type: Standard + - name: myfunctionapp.scm.azurewebsites.net + ssl_state: Disabled + host_type: Repository + server_farm_id: /subscriptions/.../resourceGroups/ansible-rg/providers/Microsoft.Web/serverfarms/EastUSPlan + reserved: false + last_modified_time_utc: '2017-08-22T18:54:01.190Z' + scm_site_also_stopped: false + client_affinity_enabled: true + client_cert_enabled: false + host_names_disabled: false + outbound_ip_addresses: ............ + container_size: 1536 + daily_memory_time_quota: 0 + resource_group: myResourceGroup + default_host_name: myfunctionapp.azurewebsites.net +''' + +try: + from azure.core.exceptions import ResourceNotFoundError +except Exception: + # This is handled in azure_rm_common + pass + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + + +class AzureRMFunctionAppInfo(AzureRMModuleBase): + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type='str'), + resource_group=dict(type='str', aliases=['resource_group_name']), + tags=dict(type='list', elements='str'), + ) + + self.results = dict( + changed=False, + ansible_info=dict(azure_functionapps=[]) + ) + + self.name = None + self.resource_group = None + self.tags = None + + super(AzureRMFunctionAppInfo, self).__init__( + self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + facts_module=True + ) + + def exec_module(self, **kwargs): + + is_old_facts = self.module._name == 'azure_rm_functionapp_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_functionapp_facts' module has been renamed to 'azure_rm_functionapp_info'", version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.name and not self.resource_group: + self.fail("Parameter error: resource group required when filtering by name.") + + if self.name: + self.results['ansible_info']['azure_functionapps'] = self.get_functionapp() + elif self.resource_group: + self.results['ansible_info']['azure_functionapps'] = self.list_resource_group() + else: + self.results['ansible_info']['azure_functionapps'] = self.list_all() + + return self.results + + def get_functionapp(self): + self.log('Get properties for Function App {0}'.format(self.name)) + function_app = None + result = [] + + try: + function_app = self.web_client.web_apps.get(resource_group_name=self.resource_group, name=self.name) + except ResourceNotFoundError: + pass + + if function_app and self.has_tags(function_app.tags, self.tags): + result = function_app.as_dict() + + return [result] + + def list_resource_group(self): + self.log('List items') + try: + response = self.web_client.web_apps.list_by_resource_group(resource_group_name=self.resource_group) + except Exception as exc: + self.fail("Error listing for resource group {0} - {1}".format(self.resource_group, str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(item.as_dict()) + return results + + def list_all(self): + self.log('List all items') + try: + response = self.web_client.web_apps.list_by_resource_group(resource_group_name=self.resource_group) + except Exception as exc: + self.fail("Error listing all items - {0}".format(str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(item.as_dict()) + return results + + +def main(): + AzureRMFunctionAppInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_gallery.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_gallery.py new file mode 100644 index 000000000..3b5e14408 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_gallery.py @@ -0,0 +1,303 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Zim Kalinowski, (@zikalino) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_gallery +version_added: "0.1.2" +short_description: Manage Azure Shared Image Gallery instance +description: + - Create, update and delete instance of Azure Shared Image Gallery (SIG). +options: + resource_group: + description: + - The name of the resource group. + required: true + type: str + name: + description: + - The name of the Shared Image Gallery. + - Valid names consist of less than 80 alphanumeric characters, underscores and periods. + required: true + type: str + location: + description: + - Resource location. + type: str + description: + description: + - The description of this Shared Image Gallery resource. This property is updatable. + type: str + state: + description: + - Assert the state of the Gallery. + - Use C(present) to create or update an Gallery and C(absent) to delete it. + default: present + type: str + choices: + - absent + - present +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' +- name: Create or update a simple gallery. + azure_rm_gallery: + resource_group: myResourceGroup + name: myGallery1283 + location: West US + description: This is the gallery description. +''' + +RETURN = ''' +id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/galleries/myGallery1283" +''' + +import time +import json +import re +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_rest import GenericRestClient +from copy import deepcopy +try: + from msrestazure.azure_exceptions import CloudError +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMGalleries(AzureRMModuleBaseExt): + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + updatable=False, + disposition='resourceGroupName', + required=True + ), + name=dict( + type='str', + updatable=False, + disposition='galleryName', + required=True + ), + location=dict( + type='str', + updatable=False, + disposition='/' + ), + description=dict( + type='str', + disposition='/properties/*' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.name = None + self.gallery = None + + self.results = dict(changed=False) + self.mgmt_client = None + self.state = None + self.url = None + self.status_code = [200, 201, 202] + self.to_do = Actions.NoAction + + self.body = {} + self.query_parameters = {} + self.query_parameters['api-version'] = '2019-07-01' + self.header_parameters = {} + self.header_parameters['Content-Type'] = 'application/json; charset=utf-8' + + super(AzureRMGalleries, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()): + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + self.body[key] = kwargs[key] + + self.inflate_parameters(self.module_arg_spec, self.body, 0) + + old_response = None + response = None + + self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient, + base_url=self._cloud_environment.endpoints.resource_manager) + + resource_group = self.get_resource_group(self.resource_group) + + if 'location' not in self.body: + self.body['location'] = resource_group.location + + self.url = ('/subscriptions' + + '/{{ subscription_id }}' + + '/resourceGroups' + + '/{{ resource_group }}' + + '/providers' + + '/Microsoft.Compute' + + '/galleries' + + '/{{ gallery_name }}') + self.url = self.url.replace('{{ subscription_id }}', self.subscription_id) + self.url = self.url.replace('{{ resource_group }}', self.resource_group) + self.url = self.url.replace('{{ gallery_name }}', self.name) + + old_response = self.get_resource() + + if not old_response: + self.log("Gallery instance doesn't exist") + + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + self.log('Gallery instance already exists') + + if self.state == 'absent': + self.to_do = Actions.Delete + else: + modifiers = {} + self.create_compare_modifiers(self.module_arg_spec, '', modifiers) + self.results['modifiers'] = modifiers + self.results['compare'] = [] + if not self.default_compare(modifiers, self.body, old_response, '', self.results): + self.to_do = Actions.Update + self.body['properties'].pop('identifier', None) + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log('Need to Create / Update the Gallery instance') + + if self.check_mode: + self.results['changed'] = True + return self.results + + response = self.create_update_resource() + + # if not old_response: + self.results['changed'] = True + # else: + # self.results['changed'] = old_response.__ne__(response) + self.log('Creation / Update done') + elif self.to_do == Actions.Delete: + self.log('Gallery instance deleted') + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_resource() + + # make sure instance is actually deleted, for some Azure resources, instance is hanging around + # for some time after deletion -- this should be really fixed in Azure + while self.get_resource(): + time.sleep(20) + else: + self.log('Gallery instance unchanged') + self.results['changed'] = False + response = old_response + + if response: + self.results["id"] = response["id"] + + return self.results + + def create_update_resource(self): + # self.log('Creating / Updating the Gallery instance {0}'.format(self.)) + + try: + response = self.mgmt_client.query(self.url, + 'PUT', + self.query_parameters, + self.header_parameters, + self.body, + self.status_code, + 600, + 30) + except CloudError as exc: + self.log('Error attempting to create the Gallery instance.') + self.fail('Error creating the Gallery instance: {0}'.format(str(exc))) + + try: + response = json.loads(response.text) + except Exception: + response = {'text': response.text} + + return response + + def delete_resource(self): + # self.log('Deleting the Gallery instance {0}'.format(self.)) + try: + response = self.mgmt_client.query(self.url, + 'DELETE', + self.query_parameters, + self.header_parameters, + None, + self.status_code, + 600, + 30) + except CloudError as e: + self.log('Error attempting to delete the Gallery instance.') + self.fail('Error deleting the Gallery instance: {0}'.format(str(e))) + + return True + + def get_resource(self): + # self.log('Checking if the Gallery instance {0} is present'.format(self.)) + found = False + try: + response = self.mgmt_client.query(self.url, + 'GET', + self.query_parameters, + self.header_parameters, + None, + self.status_code, + 600, + 30) + response = json.loads(response.text) + found = True + self.log("Response : {0}".format(response)) + # self.log("AzureFirewall instance : {0} found".format(response.name)) + except CloudError as e: + self.log('Did not find the AzureFirewall instance.') + if found is True: + return response + + return False + + +def main(): + AzureRMGalleries() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_gallery_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_gallery_info.py new file mode 100644 index 000000000..58cb29dd5 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_gallery_info.py @@ -0,0 +1,258 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Liu Qingyi, (@smile37773) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_gallery_info +version_added: "0.1.2" +short_description: Get Azure Shared Image Gallery info +description: + - Get info of Azure Shared Image Gallery. +options: + resource_group: + description: + - The name of the resource group. + type: str + name: + description: + - Resource name + type: str +extends_documentation_fragment: + - azure.azcollection.azure +author: + - Liu Qingyi (@smile37773) + +''' + +EXAMPLES = ''' +- name: List galleries in a subscription. + azure_rm_gallery_info: +- name: List galleries in a resource group. + azure_rm_gallery_info: + resource_group: myResourceGroup +- name: Get a gallery. + azure_rm_gallery_info: + resource_group: myResourceGroup + name: myGallery + +''' + +RETURN = ''' +galleries: + description: + - A list of dict results where the key is the name of the gallery and the values are the info for that gallery. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/galleries/myGallery" + name: + description: + - Resource name. + returned: always + type: str + sample: "myGallery" + location: + description: + - Resource location. + returned: always + type: str + sample: "eastus" + tags: + description: + - Resource tags. + returned: always + type: dict + sample: { "tag": "value" } + description: + description: + - This is the gallery description. + returned: always + type: str + sample: "This is the gallery description." + provisioning_state: + description: + - The current state of the gallery. + returned: always + type: str + sample: "Succeeded" + +''' + +import time +import json +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_rest import GenericRestClient +from copy import deepcopy +try: + from msrestazure.azure_exceptions import CloudError +except Exception: + # handled in azure_rm_common + pass + + +class AzureRMGalleriesInfo(AzureRMModuleBase): + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str' + ), + name=dict( + type='str' + ) + ) + + self.resource_group = None + self.name = None + + self.results = dict(changed=False) + self.mgmt_client = None + self.state = None + self.url = None + self.status_code = [200] + + self.query_parameters = {} + self.query_parameters['api-version'] = '2019-03-01' + self.header_parameters = {} + self.header_parameters['Content-Type'] = 'application/json; charset=utf-8' + + self.mgmt_client = None + super(AzureRMGalleriesInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient, + base_url=self._cloud_environment.endpoints.resource_manager) + + if (self.resource_group is not None and self.name is not None): + # self.results['galleries'] = self.format_item(self.get()) + self.results['galleries'] = self.get() + elif (self.resource_group is not None): + # self.results['galleries'] = self.format_item(self.listbyresourcegroup()) + self.results['galleries'] = self.listbyresourcegroup() + else: + # self.results['galleries'] = [self.format_item(self.list())] + self.results['galleries'] = self.list() + return self.results + + def get(self): + response = None + results = {} + # prepare url + self.url = ('/subscriptions' + + '/{{ subscription_id }}' + + '/resourceGroups' + + '/{{ resource_group }}' + + '/providers' + + '/Microsoft.Compute' + + '/galleries' + + '/{{ gallery_name }}') + self.url = self.url.replace('{{ subscription_id }}', self.subscription_id) + self.url = self.url.replace('{{ resource_group }}', self.resource_group) + self.url = self.url.replace('{{ gallery_name }}', self.name) + + try: + response = self.mgmt_client.query(self.url, + 'GET', + self.query_parameters, + self.header_parameters, + None, + self.status_code, + 600, + 30) + results = json.loads(response.text) + # self.log('Response : {0}'.format(response)) + except CloudError as e: + self.log('Could not get info for @(Model.ModuleOperationNameUpper).') + + return self.format_item(results) + + def listbyresourcegroup(self): + response = None + results = {} + # prepare url + self.url = ('/subscriptions' + + '/{{ subscription_id }}' + + '/resourceGroups' + + '/{{ resource_group }}' + + '/providers' + + '/Microsoft.Compute' + + '/galleries') + self.url = self.url.replace('{{ subscription_id }}', self.subscription_id) + self.url = self.url.replace('{{ resource_group }}', self.resource_group) + + try: + response = self.mgmt_client.query(self.url, + 'GET', + self.query_parameters, + self.header_parameters, + None, + self.status_code, + 600, + 30) + results = json.loads(response.text) + # self.log('Response : {0}'.format(response)) + except CloudError as e: + self.log('Could not get info for @(Model.ModuleOperationNameUpper).') + + return [self.format_item(x) for x in results['value']] if results['value'] else [] + + def list(self): + response = None + results = {} + # prepare url + self.url = ('/subscriptions' + + '/{{ subscription_id }}' + + '/providers' + + '/Microsoft.Compute' + + '/galleries') + self.url = self.url.replace('{{ subscription_id }}', self.subscription_id) + + try: + response = self.mgmt_client.query(self.url, + 'GET', + self.query_parameters, + self.header_parameters, + None, + self.status_code, + 600, + 30) + results = json.loads(response.text) + # self.log('Response : {0}'.format(response)) + except CloudError as e: + self.log('Could not get info for @(Model.ModuleOperationNameUpper).') + + return [self.format_item(x) for x in results['value']] if results['value'] else [] + + def format_item(self, item): + d = { + 'id': item['id'], + 'name': item['name'], + 'location': item['location'], + 'tags': item.get('tags'), + 'description': item['properties']['description'], + 'provisioning_state': item['properties']['provisioningState'] + } + return d + + +def main(): + AzureRMGalleriesInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_galleryimage.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_galleryimage.py new file mode 100644 index 000000000..a0335fa01 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_galleryimage.py @@ -0,0 +1,552 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Zim Kalinowski, (@zikalino) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_galleryimage +version_added: "0.1.2" +short_description: Manage Azure SIG Image instance +description: + - Create, update and delete instance of Azure SIG Image. +options: + resource_group: + description: + - The name of the resource group. + required: true + type: str + gallery_name: + description: + - The name of the Shared Image Gallery in which the Image Definition is to be created. + required: true + type: str + name: + description: + - The name of the gallery Image Definition to be created or updated. + - The allowed characters are alphabets and numbers with dots, dashes, and periods allowed in the middle. + - The maximum length is 80 characters. + required: true + type: str + location: + description: + - Resource location. + type: str + description: + description: + - The description of this gallery Image Definition resource. This property is updatable. + type: str + eula: + description: + - The Eula agreement for the gallery Image Definition. + type: str + privacy_statement_uri: + description: + - The privacy statement uri. + type: str + release_note_uri: + description: + - The release note uri. + type: str + os_type: + description: + - This property allows you to specify the type of the OS that is included in the disk when creating a VM from a managed image. + choices: + - windows + - linux + required: true + type: str + os_state: + description: + - The allowed values for OS State are C(generalized). + choices: + - generalized + - specialized + required: true + type: str + hypervgeneration: + description: + - This property allows you to specify the Hyper V Version of the Virtual Machines. + choices: + - V1 + - V2 + type: str + end_of_life_date: + description: + - The end of life date of the gallery Image Definition. + - This property can be used for decommissioning purposes. + - This property is updatable. + - Format should be according to ISO-8601, for instance "2019-06-26". + type: str + identifier: + description: + - Image identifier. + required: true + type: dict + suboptions: + publisher: + description: + - The name of the gallery Image Definition publisher. + required: true + type: str + offer: + description: + - The name of the gallery Image Definition offer. + required: true + type: str + sku: + description: + - The name of the gallery Image Definition SKU. + required: true + type: str + recommended: + description: + - Recommended parameter values. + type: dict + suboptions: + v_cpus: + description: + - Number of virtual CPUs. + type: dict + suboptions: + min: + description: + - The minimum number of the resource. + type: int + max: + description: + - The maximum number of the resource. + type: int + memory: + description: + - Memory. + type: dict + suboptions: + min: + description: + - The minimum number of the resource. + type: int + max: + description: + - The maximum number of the resource. + type: int + disallowed: + description: + - Disallowed parameter values. + type: dict + suboptions: + disk_types: + description: + - A list of disallowed disk types. + type: list + purchase_plan: + description: + - Purchase plan. + type: dict + suboptions: + name: + description: + - The plan ID. + type: str + publisher: + description: + - The publisher ID. + type: str + product: + description: + - The product ID. + type: str + state: + description: + - Assert the state of the GalleryImage. + - Use C(present) to create or update an GalleryImage and C(absent) to delete it. + default: present + choices: + - absent + - present + type: str +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' +- name: Create or update gallery image + azure_rm_galleryimage: + resource_group: myResourceGroup + gallery_name: myGallery1283 + name: myImage + location: West US + os_type: linux + os_state: generalized + identifier: + publisher: myPublisherName + offer: myOfferName + sku: mySkuName +''' + +RETURN = ''' +id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/galleries/myGalle + ry1283/images/myImage" +''' + +import time +import json +import re +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_rest import GenericRestClient +from copy import deepcopy +try: + from msrestazure.azure_exceptions import CloudError +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMGalleryImages(AzureRMModuleBaseExt): + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + updatable=False, + disposition='resourceGroupName', + required=True + ), + gallery_name=dict( + type='str', + updatable=False, + disposition='galleryName', + required=True + ), + name=dict( + type='str', + updatable=False, + disposition='galleryImageName', + required=True + ), + location=dict( + type='str', + updatable=False, + disposition='/' + ), + description=dict( + type='str', + disposition='/properties/*' + ), + eula=dict( + type='str', + disposition='/properties/*' + ), + privacy_statement_uri=dict( + type='str', + disposition='/properties/privacyStatementUri' + ), + release_note_uri=dict( + type='str', + disposition='/properties/releaseNoteUri' + ), + os_type=dict( + type='str', + disposition='/properties/osType', + choices=['windows', + 'linux'] + ), + os_state=dict( + type='str', + disposition='/properties/osState', + choices=['generalized', + 'specialized'] + ), + hypervgeneration=dict( + type='str', + disposition='/properties/hyperVGeneration', + choices=['V1', + 'V2'] + ), + end_of_life_date=dict( + type='str', + disposition='/properties/endOfLifeDate' + ), + identifier=dict( + type='dict', + disposition='/properties/*', + options=dict( + publisher=dict( + type='str', + required=True, + updatable=False + ), + offer=dict( + type='str', + required=True + ), + sku=dict( + type='str', + required=True + ) + ) + ), + recommended=dict( + type='dict', + disposition='/properties/*', + options=dict( + v_cpus=dict( + type='dict', + disposition='vCPUs', + options=dict( + min=dict( + type='int' + ), + max=dict( + type='int' + ) + ) + ), + memory=dict( + type='dict', + options=dict( + min=dict( + type='int' + ), + max=dict( + type='int' + ) + ) + ) + ) + ), + disallowed=dict( + type='dict', + disposition='/properties/*', + options=dict( + disk_types=dict( + type='list', + disposition='diskTypes' + ) + ) + ), + purchase_plan=dict( + type='dict', + disposition='/properties/purchasePlan', + options=dict( + name=dict( + type='str' + ), + publisher=dict( + type='str' + ), + product=dict( + type='str' + ) + ) + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.gallery_name = None + self.name = None + self.gallery_image = None + + self.results = dict(changed=False) + self.mgmt_client = None + self.state = None + self.url = None + self.status_code = [200, 201, 202] + self.to_do = Actions.NoAction + + self.body = {} + self.query_parameters = {} + self.query_parameters['api-version'] = '2019-07-01' + self.header_parameters = {} + self.header_parameters['Content-Type'] = 'application/json; charset=utf-8' + + super(AzureRMGalleryImages, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()): + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + self.body[key] = kwargs[key] + + self.inflate_parameters(self.module_arg_spec, self.body, 0) + + old_response = None + response = None + + self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient, + base_url=self._cloud_environment.endpoints.resource_manager) + + resource_group = self.get_resource_group(self.resource_group) + + if 'location' not in self.body: + self.body['location'] = resource_group.location + + self.url = ('/subscriptions' + + '/{{ subscription_id }}' + + '/resourceGroups' + + '/{{ resource_group }}' + + '/providers' + + '/Microsoft.Compute' + + '/galleries' + + '/{{ gallery_name }}' + + '/images' + + '/{{ image_name }}') + self.url = self.url.replace('{{ subscription_id }}', self.subscription_id) + self.url = self.url.replace('{{ resource_group }}', self.resource_group) + self.url = self.url.replace('{{ gallery_name }}', self.gallery_name) + self.url = self.url.replace('{{ image_name }}', self.name) + + old_response = self.get_resource() + + if not old_response: + self.log("GalleryImage instance doesn't exist") + + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + self.log('GalleryImage instance already exists') + + if self.state == 'absent': + self.to_do = Actions.Delete + else: + modifiers = {} + self.create_compare_modifiers(self.module_arg_spec, '', modifiers) + self.results['modifiers'] = modifiers + self.results['compare'] = [] + if not self.default_compare(modifiers, self.body, old_response, '', self.results): + self.to_do = Actions.Update + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log('Need to Create / Update the GalleryImage instance') + + if self.check_mode: + self.results['changed'] = True + return self.results + + response = self.create_update_resource() + + # if not old_response: + self.results['changed'] = True + # else: + # self.results['changed'] = old_response.__ne__(response) + self.log('Creation / Update done') + elif self.to_do == Actions.Delete: + self.log('GalleryImage instance deleted') + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_resource() + + # make sure instance is actually deleted, for some Azure resources, instance is hanging around + # for some time after deletion -- this should be really fixed in Azure + while self.get_resource(): + time.sleep(20) + else: + self.log('GalleryImage instance unchanged') + self.results['changed'] = False + response = old_response + + if response: + self.results["id"] = response["id"] + + return self.results + + def create_update_resource(self): + # self.log('Creating / Updating the GalleryImage instance {0}'.format(self.)) + + try: + response = self.mgmt_client.query(self.url, + 'PUT', + self.query_parameters, + self.header_parameters, + self.body, + self.status_code, + 600, + 30) + except CloudError as exc: + self.log('Error attempting to create the GalleryImage instance.') + self.fail('Error creating the GalleryImage instance: {0}'.format(str(exc))) + + try: + response = json.loads(response.text) + except Exception: + response = {'text': response.text} + + return response + + def delete_resource(self): + # self.log('Deleting the GalleryImage instance {0}'.format(self.)) + try: + response = self.mgmt_client.query(self.url, + 'DELETE', + self.query_parameters, + self.header_parameters, + None, + self.status_code, + 600, + 30) + except CloudError as e: + self.log('Error attempting to delete the GalleryImage instance.') + self.fail('Error deleting the GalleryImage instance: {0}'.format(str(e))) + + return True + + def get_resource(self): + # self.log('Checking if the GalleryImage instance {0} is present'.format(self.)) + found = False + try: + response = self.mgmt_client.query(self.url, + 'GET', + self.query_parameters, + self.header_parameters, + None, + self.status_code, + 600, + 30) + response = json.loads(response.text) + found = True + self.log("Response : {0}".format(response)) + # self.log("AzureFirewall instance : {0} found".format(response.name)) + except CloudError as e: + self.log('Did not find the AzureFirewall instance.') + if found is True: + return response + + return False + + +def main(): + AzureRMGalleryImages() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_galleryimage_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_galleryimage_info.py new file mode 100644 index 000000000..90bcbe244 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_galleryimage_info.py @@ -0,0 +1,269 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Liu Qingyi, (@smile37773) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_galleryimage_info +version_added: "0.1.2" +short_description: Get Azure SIG Image info +description: + - Get info of Azure SIG Image. +options: + resource_group: + description: + - The name of the resource group. + type: str + required: true + gallery_name: + description: + - The name of the shared image gallery from which the image definitions are to be retrieved. + type: str + required: true + name: + description: + - Resource name. + type: str +extends_documentation_fragment: + - azure.azcollection.azure +author: + - Liu Qingyi (@smile37773) + +''' + +EXAMPLES = ''' +- name: List gallery images in a gallery. + azure_rm_galleryimage_info: + resource_group: myResourceGroup + gallery_name: myGallery +- name: Get a gallery image. + azure_rm_galleryimage_info: + resource_group: myResourceGroup + gallery_name: myGallery + name: myImage + +''' + +RETURN = ''' +images: + description: + - A list of dict results where the key is the name of the image and the values are the info for that image. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup + /providers/Microsoft.Compute/galleries/myGallery/images/myImage" + name: + description: + - Resource name. + returned: always + type: str + sample: myImage + location: + description: + - Resource location. + returned: always + type: str + sample: "eastus" + tags: + description: + - Resource tags. + returned: always + type: dict + sample: { "tag": "value" } + os_state: + description: + - The allowed values for OS State are C(generalized). + type: OperatingSystemStateTypes + sample: "Generalized" + os_type: + description: + - This property allows you to specify the type of the OS that is included in the disk when creating a VM from a managed image. + type: OperatingSystemTypes + sample: "linux/windows" + identifier: + description: + - This is the gallery image definition identifier. + type: dict + contains: + offer: + description: + - The name of the gallery image definition offer. + type: str + sample: "myOfferName" + publisher: + description: + - The name of the gallery image definition publisher. + type: str + sample: "myPublisherName" + sku: + description: + - The name of the gallery image definition sku. + type: str + sample: "mySkuName" + +''' + +import time +import json +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_rest import GenericRestClient +from copy import deepcopy +try: + from msrestazure.azure_exceptions import CloudError +except Exception: + # handled in azure_rm_common + pass + + +class AzureRMGalleryImagesInfo(AzureRMModuleBase): + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + gallery_name=dict( + type='str', + required=True + ), + name=dict( + type='str' + ) + ) + + self.resource_group = None + self.gallery_name = None + self.name = None + + self.results = dict(changed=False) + self.mgmt_client = None + self.state = None + self.url = None + self.status_code = [200] + + self.query_parameters = {} + self.query_parameters['api-version'] = '2019-03-01' + self.header_parameters = {} + self.header_parameters['Content-Type'] = 'application/json; charset=utf-8' + + self.mgmt_client = None + super(AzureRMGalleryImagesInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient, + base_url=self._cloud_environment.endpoints.resource_manager) + + if (self.resource_group is not None and + self.gallery_name is not None and + self.name is not None): + # self.results['gallery_images'] = self.format_item(self.get()) + self.results['images'] = self.get() + elif (self.resource_group is not None and + self.gallery_name is not None): + # self.results['gallery_images'] = self.format_item(self.listbygallery()) + self.results['images'] = self.listbygallery() + return self.results + + def get(self): + response = None + results = {} + # prepare url + self.url = ('/subscriptions' + + '/{{ subscription_id }}' + + '/resourceGroups' + + '/{{ resource_group }}' + + '/providers' + + '/Microsoft.Compute' + + '/galleries' + + '/{{ gallery_name }}' + + '/images' + + '/{{ image_name }}') + self.url = self.url.replace('{{ subscription_id }}', self.subscription_id) + self.url = self.url.replace('{{ resource_group }}', self.resource_group) + self.url = self.url.replace('{{ gallery_name }}', self.gallery_name) + self.url = self.url.replace('{{ image_name }}', self.name) + + try: + response = self.mgmt_client.query(self.url, + 'GET', + self.query_parameters, + self.header_parameters, + None, + self.status_code, + 600, + 30) + results = json.loads(response.text) + # self.log('Response : {0}'.format(response)) + except CloudError as e: + self.log('Could not get info for @(Model.ModuleOperationNameUpper).') + + return self.format_item(results) + + def listbygallery(self): + response = None + results = {} + # prepare url + self.url = ('/subscriptions' + + '/{{ subscription_id }}' + + '/resourceGroups' + + '/{{ resource_group }}' + + '/providers' + + '/Microsoft.Compute' + + '/galleries' + + '/{{ gallery_name }}' + + '/images') + self.url = self.url.replace('{{ subscription_id }}', self.subscription_id) + self.url = self.url.replace('{{ resource_group }}', self.resource_group) + self.url = self.url.replace('{{ gallery_name }}', self.gallery_name) + + try: + response = self.mgmt_client.query(self.url, + 'GET', + self.query_parameters, + self.header_parameters, + None, + self.status_code, + 600, + 30) + results = json.loads(response.text) + # self.log('Response : {0}'.format(response)) + except CloudError as e: + self.log('Could not get info for @(Model.ModuleOperationNameUpper).') + + return [self.format_item(x) for x in results['value']] if results['value'] else [] + + def format_item(self, item): + d = { + 'id': item['id'], + 'name': item['name'], + 'location': item['location'], + 'tags': item.get('tags'), + 'os_state': item['properties']['osState'], + 'os_type': item['properties']['osType'], + 'identifier': item['properties']['identifier'] + } + return d + + +def main(): + AzureRMGalleryImagesInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_galleryimageversion.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_galleryimageversion.py new file mode 100644 index 000000000..c539e3d4a --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_galleryimageversion.py @@ -0,0 +1,637 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Zim Kalinowski, (@zikalino) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_galleryimageversion +version_added: "0.1.2" +short_description: Manage Azure SIG Image Version instance +description: + - Create, update and delete instance of Azure SIG Image Version. +options: + resource_group: + description: + - The name of the resource group. + required: true + type: str + gallery_name: + description: + - The name of the Shared Image Gallery in which the Image Definition resides. + required: true + type: str + gallery_image_name: + description: + - The name of the gallery Image Definition in which the Image Version is to be created. + required: true + type: str + name: + description: + - The name of the gallery Image Version to be created. + - Needs to follow semantic version name pattern, The allowed characters are digit and period. + - Digits must be within the range of a 32-bit integer. For example ... + required: true + type: str + location: + description: + - Resource location. + type: str + storage_profile: + description: + - Storage profile + required: true + type: dict + suboptions: + source_image: + description: + - Reference to managed image or gallery image version + - Could be resource ID to managed image, or dictionary containing I(resource_group) and I(name) + - Could be resource ID to image version, or dictionary containing I(resource_group),I(gallery_name), I(gallery_image_name) and I(version) + - Mutual exclusive with os_disk and data_disks + type: raw + os_disk: + description: + - os disk snapshot + - Mutual exclusive with source_image + type: raw + suboptions: + source: + description: + - Reference to os disk snapshot. Could be resource ID or dictionary containing I(resource_group) and I(name) + type: str + host_caching: + description: + - host disk caching + type: str + default: None + choices: + - None + - ReadOnly + - ReadWrite + data_disks: + description: + - list of data disk snapshot + - Mutual exclusive with source_image + type: list + suboptions: + source: + description: + - Reference to data disk snapshot. Could be resource ID or dictionary containing I(resource_group) and I(name) + type: str + lun: + description: + - lun of the data disk + type: int + host_caching: + description: + - host disk caching + type: str + default: None + choices: + - None + - ReadOnly + - ReadWrite + publishing_profile: + description: + - Publishing profile. + required: true + type: dict + suboptions: + target_regions: + description: + - The target regions where the Image Version is going to be replicated to. + - This property is updatable. + type: list + suboptions: + name: + description: + - Region name. + type: str + regional_replica_count: + description: + - The number of replicas of the Image Version to be created per region. + - This property would take effect for a region when regionalReplicaCount is not specified. + - This property is updatable. + type: str + storage_account_type: + description: + - Storage account type. + type: str + managed_image: + description: + - Managed image reference, could be resource ID, or dictionary containing I(resource_group) and I(name) + - Obsolete since 2.10, use storage_profile instead + snapshot: + description: + - Source snapshot to be used. + - Obsolete since 2.10, use storage_profile instead + replica_count: + description: + - The number of replicas of the Image Version to be created per region. + - This property would take effect for a region when regionalReplicaCount is not specified. + - This property is updatable. + type: int + exclude_from_latest: + description: + If I(exclude_from_latest=true), Virtual Machines deployed from the latest version of the Image Definition won't use this Image Version. + type: bool + end_of_life_date: + description: + - The end of life date of the gallery Image Version. + - This property can be used for decommissioning purposes. + - This property is updatable. Format should be according to ISO-8601, for instance "2019-06-26". + type: str + storage_account_type: + description: + - Specifies the storage account type to be used to store the image. + - This property is not updatable. + type: str + state: + description: + - Assert the state of the GalleryImageVersion. + - Use C(present) to create or update an GalleryImageVersion and C(absent) to delete it. + default: present + choices: + - absent + - present + type: str +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' +- name: Create a gallery image version form a managed image + azure_rm_galleryimageversion: + resource_group: myResourceGroup + gallery_name: myGallery + gallery_image_name: myGalleryImage + name: 1.1.0 + location: East US + publishing_profile: + end_of_life_date: "2020-10-01t00:00:00+00:00" + exclude_from_latest: yes + replica_count: 4 + storage_account_type: Standard_LRS + target_regions: + - name: West US + regional_replica_count: 1 + - name: East US + regional_replica_count: 3 + storage_account_type: Standard_LRS + storage_profile: + source_image: /subscriptions/sub123/resourceGroups/group123/providers/Microsoft.Compute/images/myOsImage + +- name: Create a gallery image version from another gallery image version + azure_rm_galleryimageversion: + resource_group: myResourceGroup + gallery_name: myGallery + gallery_image_name: myGalleryImage + name: 1.2.0 + location: East US + publishing_profile: + end_of_life_date: "2020-10-01t00:00:00+00:00" + exclude_from_latest: yes + replica_count: 4 + storage_account_type: Standard_LRS + target_regions: + - name: West US + regional_replica_count: 1 + - name: East US + regional_replica_count: 3 + storage_account_type: Standard_LRS + storage_profile: + source_image: + version: 1.1.0 + gallery_name: myGallery2 + gallery_image_name: myGalleryImage2 + +- name: Create gallery image by using one os dist snapshot and zero or many data disk snapshots + azure_rm_galleryimageversion: + resource_group: myRsourceGroup + gallery_name: myGallery + gallery_image_name: myGalleryImage + name: 3.4.0 + location: East US + publishing_profile: + end_of_life_date: "2020-10-01t00:00:00+00:00" + exclude_from_latest: yes + replica_count: 1 + storage_account_type: Standard_LRS + target_regions: + - name: East US + regional_replica_count: 1 + storage_account_type: Standard_LRS + storage_profile: + os_disk: + source: "/subscriptions/mySub/resourceGroups/myGroup/providers/Microsoft.Compute/snapshots/os_snapshot_vma" + data_disks: + - lun: 0 + source: + name: data_snapshot_vma + - lun: 1 + source: "/subscriptions/mySub/resourceGroups/myGroup/providers/Microsoft.Compute/snapshots/data_snapshot_vmb" +''' + +RETURN = ''' +id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/galleries/myGalle + ry1283/images/myImage/versions/10.1.3" +''' + +import time +import json +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_rest import GenericRestClient +try: + from msrestazure.azure_exceptions import CloudError +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMGalleryImageVersions(AzureRMModuleBaseExt): + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + updatable=False, + disposition='resourceGroupName', + required=True + ), + gallery_name=dict( + type='str', + updatable=False, + disposition='galleryName', + required=True + ), + gallery_image_name=dict( + type='str', + updatable=False, + disposition='galleryImageName', + required=True + ), + name=dict( + type='str', + updatable=False, + disposition='galleryImageVersionName', + required=True + ), + tags=dict( + type='dict', + updatable=False, + disposition='tags', + comparison='tags' + ), + location=dict( + type='str', + updatable=False, + disposition='/', + comparison='location' + ), + storage_profile=dict( + type='dict', + updatable=False, + disposition='/properties/storageProfile', + comparison='ignore', + options=dict( + source_image=dict( + type='raw', + disposition='source/id', + purgeIfNone=True, + pattern=[('/subscriptions/{subscription_id}/resourceGroups' + '/{resource_group}/providers/Microsoft.Compute' + '/images/{name}'), + ('/subscriptions/{subscription_id}/resourceGroups' + '/{resource_group}/providers/Microsoft.Compute' + '/galleries/{gallery_name}/images/{gallery_image_name}' + '/versions/{version}')] + ), + os_disk=dict( + type='dict', + disposition='osDiskImage', + purgeIfNone=True, + comparison='ignore', + options=dict( + source=dict( + type='raw', + disposition='source/id', + pattern=('/subscriptions/{subscription_id}/resourceGroups' + '/{resource_group}/providers/Microsoft.Compute' + '/snapshots/{name}') + ), + host_caching=dict( + type='str', + disposition='hostCaching', + default="None", + choices=["ReadOnly", "ReadWrite", "None"] + ) + ) + ), + data_disks=dict( + type='list', + disposition='dataDiskImages', + purgeIfNone=True, + options=dict( + lun=dict( + type='int' + ), + source=dict( + type='raw', + disposition="source/id", + pattern=('/subscriptions/{subscription_id}/resourceGroups' + '/{resource_group}/providers/Microsoft.Compute' + '/snapshots/{name}') + ), + host_caching=dict( + type='str', + disposition='hostCaching', + default="None", + choices=["ReadOnly", "ReadWrite", "None"] + ) + ) + ) + ) + ), + publishing_profile=dict( + type='dict', + disposition='/properties/publishingProfile', + options=dict( + target_regions=dict( + type='list', + disposition='targetRegions', + options=dict( + name=dict( + type='str', + required=True, + comparison='location' + ), + regional_replica_count=dict( + type='int', + disposition='regionalReplicaCount' + ), + storage_account_type=dict( + type='str', + disposition='storageAccountType' + ) + ) + ), + managed_image=dict( + type='raw', + pattern=('/subscriptions/{subscription_id}/resourceGroups' + '/{resource_group}/providers/Microsoft.Compute' + '/images/{name}'), + comparison='ignore' + ), + snapshot=dict( + type='raw', + pattern=('/subscriptions/{subscription_id}/resourceGroups' + '/{resource_group}/providers/Microsoft.Compute' + '/snapshots/{name}'), + comparison='ignore' + ), + replica_count=dict( + type='int', + disposition='replicaCount' + ), + exclude_from_latest=dict( + type='bool', + disposition='excludeFromLatest' + ), + end_of_life_date=dict( + type='str', + disposition='endOfLifeDate' + ), + storage_account_type=dict( + type='str', + disposition='storageAccountType', + choices=['Standard_LRS', + 'Standard_ZRS'] + ) + ) + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.gallery_name = None + self.gallery_image_name = None + self.name = None + self.gallery_image_version = None + self.tags = None + + self.results = dict(changed=False) + self.mgmt_client = None + self.state = None + self.url = None + self.status_code = [200, 201, 202] + self.to_do = Actions.NoAction + + self.body = {} + self.query_parameters = {} + self.query_parameters['api-version'] = '2019-07-01' + self.header_parameters = {} + self.header_parameters['Content-Type'] = 'application/json; charset=utf-8' + + super(AzureRMGalleryImageVersions, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()): + if hasattr(self, key): + setattr(self, key, kwargs[key]) + if key == 'tags': + self.body[key] = kwargs[key] + elif kwargs[key] is not None: + self.body[key] = kwargs[key] + + self.inflate_parameters(self.module_arg_spec, self.body, 0) + + # keep backward compatibility + snapshot = self.body.get('properties', {}).get('publishingProfile', {}).pop('snapshot', None) + if snapshot is not None: + self.body['properties'].setdefault('storageProfile', {}).setdefault('osDiskImage', {}).setdefault('source', {})['id'] = snapshot + managed_image = self.body.get('properties', {}).get('publishingProfile', {}).pop('managed_image', None) + if managed_image: + self.body['properties'].setdefault('storageProfile', {}).setdefault('source', {})['id'] = managed_image + + old_response = None + response = None + + self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient, + base_url=self._cloud_environment.endpoints.resource_manager) + + resource_group = self.get_resource_group(self.resource_group) + + if 'location' not in self.body: + self.body['location'] = resource_group.location + + self.url = ('/subscriptions' + + '/{{ subscription_id }}' + + '/resourceGroups' + + '/{{ resource_group }}' + + '/providers' + + '/Microsoft.Compute' + + '/galleries' + + '/{{ gallery_name }}' + + '/images' + + '/{{ image_name }}' + + '/versions' + + '/{{ version_name }}') + self.url = self.url.replace('{{ subscription_id }}', self.subscription_id) + self.url = self.url.replace('{{ resource_group }}', self.resource_group) + self.url = self.url.replace('{{ gallery_name }}', self.gallery_name) + self.url = self.url.replace('{{ image_name }}', self.gallery_image_name) + self.url = self.url.replace('{{ version_name }}', self.name) + + old_response = self.get_resource() + + if not old_response: + self.log("GalleryImageVersion instance doesn't exist") + + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + self.log('GalleryImageVersion instance already exists') + + if self.state == 'absent': + self.to_do = Actions.Delete + else: + update_tags, newtags = self.update_tags(old_response.get('tags', dict())) + if update_tags: + self.tags = newtags + self.body['tags'] = self.tags + self.to_do = Actions.Update + modifiers = {} + self.create_compare_modifiers(self.module_arg_spec, '', modifiers) + self.results['modifiers'] = modifiers + self.results['compare'] = [] + if not self.default_compare(modifiers, self.body, old_response, '', self.results): + self.to_do = Actions.Update + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log('Need to Create / Update the GalleryImageVersion instance') + + if self.check_mode: + self.results['changed'] = True + return self.results + + response = self.create_update_resource() + + self.results['changed'] = True + self.log('Creation / Update done') + elif self.to_do == Actions.Delete: + self.log('GalleryImageVersion instance deleted') + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_resource() + else: + self.log('GalleryImageVersion instance unchanged') + self.results['changed'] = False + response = old_response + + if response: + self.results["id"] = response["id"] + + return self.results + + def create_update_resource(self): + # self.log('Creating / Updating the GalleryImageVersion instance {0}'.format(self.)) + + try: + response = self.mgmt_client.query(self.url, + 'PUT', + self.query_parameters, + self.header_parameters, + self.body, + self.status_code, + 600, + 30) + except CloudError as exc: + self.log('Error attempting to create the GalleryImageVersion instance.') + self.fail('Error creating the GalleryImageVersion instance: {0}'.format(str(exc))) + + try: + response = json.loads(response.text) + except Exception: + response = {'text': response.text} + + while response['properties']['provisioningState'] == 'Creating': + time.sleep(60) + response = self.get_resource() + + return response + + def delete_resource(self): + # self.log('Deleting the GalleryImageVersion instance {0}'.format(self.)) + try: + response = self.mgmt_client.query(self.url, + 'DELETE', + self.query_parameters, + self.header_parameters, + None, + self.status_code, + 600, + 30) + except CloudError as e: + self.log('Error attempting to delete the GalleryImageVersion instance.') + self.fail('Error deleting the GalleryImageVersion instance: {0}'.format(str(e))) + return True + + def get_resource(self): + # self.log('Checking if the GalleryImageVersion instance {0} is present'.format(self.)) + found = False + try: + response = self.mgmt_client.query(self.url, + 'GET', + self.query_parameters, + self.header_parameters, + None, + self.status_code, + 600, + 30) + response = json.loads(response.text) + found = True + self.log("Response : {0}".format(response)) + # self.log("AzureFirewall instance : {0} found".format(response.name)) + except CloudError as e: + self.log('Did not find the AzureFirewall instance.') + if found is True: + return response + + return False + + +def main(): + AzureRMGalleryImageVersions() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_galleryimageversion_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_galleryimageversion_info.py new file mode 100644 index 000000000..1d448ca12 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_galleryimageversion_info.py @@ -0,0 +1,284 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Liu Qingyi, (@smile37773) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_galleryimageversion_info +version_added: "0.1.2" +short_description: Get Azure SIG Image Version info +description: + - Get info of Azure SIG Image Version. +options: + resource_group: + description: + - The name of the resource group. + type: str + required: true + gallery_name: + description: + - The name of the Shared Image Gallery in which the Image Definition resides. + type: str + required: true + gallery_image_name: + description: + - The name of the gallery Image Definition in which the Image Version resides. + type: str + required: true + name: + description: + - Resource name. + type: str +extends_documentation_fragment: + - azure.azcollection.azure +author: + - Liu Qingyi (@smile37773) + +''' + +EXAMPLES = ''' +- name: List gallery image versions in a gallery image definition. + azure_rm_galleryimageversion_info: + resource_group: myResourceGroup + gallery_name: myGallery + gallery_image_name: myImage +- name: Get a gallery image version. + azure_rm_galleryimageversion_info: + resource_group: myResourceGroup + gallery_name: myGallery + gallery_image_name: myImage + name: myVersion + +''' + +RETURN = ''' +versions: + description: + A list of dict results where the key is the name of the version and the values are the info for that version. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups + /myResourceGroup/providers/Microsoft.Compute/galleries/myGallery/images/myImage/versions/myVersion" + name: + description: + - Resource name. + returned: always + type: str + sample: "myVersion" + location: + description: + - Resource location. + returned: always + type: str + sample: "eastus" + tags: + description: + - Resource tags. + returned: always + type: dict + sample: { "tag": "value" } + publishing_profile: + description: + - The publishing profile of a gallery image version. + type: dict + provisioning_state: + description: + - The current state of the gallery. + type: str + sample: "Succeeded" + +''' + +import time +import json +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_rest import GenericRestClient +from copy import deepcopy +try: + from msrestazure.azure_exceptions import CloudError +except Exception: + # handled in azure_rm_common + pass + + +class AzureRMGalleryImageVersionsInfo(AzureRMModuleBase): + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + gallery_name=dict( + type='str', + required=True + ), + gallery_image_name=dict( + type='str', + required=True + ), + name=dict( + type='str' + ) + ) + + self.resource_group = None + self.gallery_name = None + self.gallery_image_name = None + self.name = None + + self.results = dict(changed=False) + self.mgmt_client = None + self.state = None + self.url = None + self.status_code = [200] + + self.query_parameters = {} + self.query_parameters['api-version'] = '2019-03-01' + self.header_parameters = {} + self.header_parameters['Content-Type'] = 'application/json; charset=utf-8' + + self.mgmt_client = None + super(AzureRMGalleryImageVersionsInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient, + base_url=self._cloud_environment.endpoints.resource_manager) + + if (self.resource_group is not None and + self.gallery_name is not None and + self.gallery_image_name is not None and + self.name is not None): + self.results['versions'] = self.get() + elif (self.resource_group is not None and + self.gallery_name is not None and + self.gallery_image_name is not None): + self.results['versions'] = self.listbygalleryimage() + return self.results + + def get(self): + response = None + results = {} + # prepare url + self.url = ('/subscriptions' + + '/{{ subscription_id }}' + + '/resourceGroups' + + '/{{ resource_group }}' + + '/providers' + + '/Microsoft.Compute' + + '/galleries' + + '/{{ gallery_name }}' + + '/images' + + '/{{ image_name }}' + + '/versions' + + '/{{ version_name }}') + self.url = self.url.replace('{{ subscription_id }}', self.subscription_id) + self.url = self.url.replace('{{ resource_group }}', self.resource_group) + self.url = self.url.replace('{{ gallery_name }}', self.gallery_name) + self.url = self.url.replace('{{ image_name }}', self.gallery_image_name) + self.url = self.url.replace('{{ version_name }}', self.name) + + try: + response = self.mgmt_client.query(self.url, + 'GET', + self.query_parameters, + self.header_parameters, + None, + self.status_code, + 600, + 30) + results = json.loads(response.text) + # self.log('Response : {0}'.format(response)) + except CloudError as e: + self.log('Could not get info for @(Model.ModuleOperationNameUpper).') + + return self.format_item(results) + + def listbygalleryimage(self): + response = None + results = dict( + response=[] + ) + # prepare url + self.url = ('/subscriptions' + + '/{{ subscription_id }}' + + '/resourceGroups' + + '/{{ resource_group }}' + + '/providers' + + '/Microsoft.Compute' + + '/galleries' + + '/{{ gallery_name }}' + + '/images' + + '/{{ image_name }}' + + '/versions') + self.url = self.url.replace('{{ subscription_id }}', self.subscription_id) + self.url = self.url.replace('{{ resource_group }}', self.resource_group) + self.url = self.url.replace('{{ gallery_name }}', self.gallery_name) + self.url = self.url.replace('{{ image_name }}', self.gallery_image_name) + + try: + skiptoken = None + + while True: + if skiptoken: + self.query_parameters['skiptoken'] = skiptoken + + response = self.mgmt_client.query(self.url, + 'GET', + self.query_parameters, + self.header_parameters, + None, + [200, 404], + 0, + 0) + try: + response = json.loads(response.text) + if isinstance(response, dict): + if response.get('value'): + results['response'] = results['response'] + response['value'] + skiptoken = response.get('nextLink') + else: + results['response'] = results['response'] + [response] + except Exception as e: + self.fail('Failed to parse response: ' + str(e)) + if not skiptoken: + break + # self.log('Response : {0}'.format(response)) + except CloudError as e: + self.log('Could not get info for @(Model.ModuleOperationNameUpper).') + + return [self.format_item(x) for x in results['response']] if results['response'] else [] + + def format_item(self, item): + d = { + 'id': item['id'], + 'name': item['name'], + 'location': item['location'], + 'tags': item.get('tags'), + 'publishing_profile': item['properties']['publishingProfile'], + 'provisioning_state': item['properties']['provisioningState'] + } + return d + + +def main(): + AzureRMGalleryImageVersionsInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_hdinsightcluster.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_hdinsightcluster.py new file mode 100644 index 000000000..ce498df22 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_hdinsightcluster.py @@ -0,0 +1,552 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Zim Kalinowski, (@zikalino) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_hdinsightcluster +version_added: "0.1.2" +short_description: Manage Azure HDInsight Cluster instance +description: + - Create, update and delete instance of Azure HDInsight Cluster. + +options: + resource_group: + description: + - The name of the resource group. + required: True + name: + description: + - The name of the cluster. + required: True + location: + description: + - Resource location. If not set, location from the resource group will be used as default. + cluster_version: + description: + - The version of the cluster. For example C(3.6). + os_type: + description: + - The type of operating system. + choices: + - 'linux' + tier: + description: + - The cluster tier. + choices: + - 'standard' + - 'premium' + cluster_definition: + description: + - The cluster definition. + suboptions: + kind: + description: + - The type of cluster. + choices: + - hadoop + - spark + - hbase + - storm + gateway_rest_username: + description: + - Gateway REST user name. + gateway_rest_password: + description: + - Gateway REST password. + compute_profile_roles: + description: + - The list of roles in the cluster. + type: list + suboptions: + name: + description: + - The name of the role. + choices: + - 'headnode' + - 'workernode' + - 'zookepernode' + min_instance_count: + description: + - The minimum instance count of the cluster. + target_instance_count: + description: + - The instance count of the cluster. + vm_size: + description: + - The size of the VM. + linux_profile: + description: + - The Linux OS profile. + suboptions: + username: + description: + - SSH user name. + password: + description: + - SSH password. + storage_accounts: + description: + - The list of storage accounts in the cluster. + type: list + suboptions: + name: + description: + - Blob storage endpoint. For example storage_account_name.blob.core.windows.net. + is_default: + description: + - Whether or not the storage account is the default storage account. + container: + description: + - The container in the storage account. + key: + description: + - The storage account access key. + state: + description: + - Assert the state of the cluster. + - Use C(present) to create or update a cluster and C(absent) to delete it. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' +- name: Create instance of HDInsight Cluster + azure_rm_hdinsightcluster: + resource_group: myResourceGroup + name: myCluster + location: eastus2 + cluster_version: 3.6 + os_type: linux + tier: standard + cluster_definition: + kind: spark + gateway_rest_username: http-user + gateway_rest_password: MuABCPassword!!@123 + storage_accounts: + - name: myStorageAccount.blob.core.windows.net + is_default: yes + container: myContainer + key: GExmaxH4lDNdHA9nwAsCt8t4AOQas2y9vXQP1kKALTram7Q3/5xLVIab3+nYG1x63Xyak9/VXxQyNBHA9pDWw== + compute_profile_roles: + - name: headnode + target_instance_count: 2 + hardware_profile: + vm_size: Standard_D3 + linux_profile: + username: sshuser + password: MuABCPassword!!@123 + - name: workernode + target_instance_count: 2 + vm_size: Standard_D3 + linux_profile: + username: sshuser + password: MuABCPassword!!@123 +''' + +RETURN = ''' +id: + description: + - Fully qualified resource id of the cluster. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.HDInsight/clusters/myCluster +''' + +import time +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.polling import LROPoller + from azure.core.exceptions import ResourceNotFoundError + from msrestazure.azure_operation import AzureOperationPoller + from azure.mgmt.hdinsight import HDInsightManagementClient + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMClusters(AzureRMModuleBase): + """Configuration class for an Azure RM Cluster resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + location=dict( + type='str' + ), + cluster_version=dict( + type='str' + ), + os_type=dict( + type='str', + choices=['linux'] + ), + tier=dict( + type='str', + choices=['standard', + 'premium'] + ), + cluster_definition=dict( + type='dict' + ), + compute_profile_roles=dict( + type='list' + ), + storage_accounts=dict( + type='list' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.name = None + self.parameters = dict() + + self.results = dict(changed=False) + self.mgmt_client = None + self.state = None + self.to_do = Actions.NoAction + self.tags_changed = False + self.new_instance_count = None + + super(AzureRMClusters, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()) + ['tags']: + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + self.parameters[key] = kwargs[key] + + dict_expand(self.parameters, ['cluster_version'], 'properties') + dict_camelize(self.parameters, ['os_type'], True) + dict_expand(self.parameters, ['os_type'], 'properties') + dict_camelize(self.parameters, ['tier'], True) + dict_expand(self.parameters, ['tier'], 'properties') + + dict_rename(self.parameters, ['cluster_definition', 'gateway_rest_username'], 'restAuthCredential.username') + dict_rename(self.parameters, ['cluster_definition', 'gateway_rest_password'], 'restAuthCredential.password') + dict_expand(self.parameters, ['cluster_definition', 'restAuthCredential.username'], 'gateway') + dict_expand(self.parameters, ['cluster_definition', 'restAuthCredential.password'], 'gateway') + dict_expand(self.parameters, ['cluster_definition', 'gateway'], 'configurations') + + dict_expand(self.parameters, ['cluster_definition'], 'properties') + dict_expand(self.parameters, ['compute_profile_roles', 'vm_size'], 'hardware_profile') + dict_rename(self.parameters, ['compute_profile_roles', 'linux_profile'], 'linux_operating_system_profile') + dict_expand(self.parameters, ['compute_profile_roles', 'linux_operating_system_profile'], 'os_profile') + dict_rename(self.parameters, ['compute_profile_roles'], 'roles') + dict_expand(self.parameters, ['roles'], 'compute_profile') + dict_expand(self.parameters, ['compute_profile'], 'properties') + dict_rename(self.parameters, ['storage_accounts'], 'storageaccounts') + dict_expand(self.parameters, ['storageaccounts'], 'storage_profile') + dict_expand(self.parameters, ['storage_profile'], 'properties') + + response = None + + self.mgmt_client = self.get_mgmt_svc_client(HDInsightManagementClient, + is_track2=True, + base_url=self._cloud_environment.endpoints.resource_manager) + + resource_group = self.get_resource_group(self.resource_group) + + if "location" not in self.parameters: + self.parameters["location"] = resource_group.location + + old_response = self.get_cluster() + + if not old_response: + self.log("Cluster instance doesn't exist") + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + self.log("Cluster instance already exists") + if self.state == 'absent': + self.to_do = Actions.Delete + elif self.state == 'present': + compare_result = {} + if (not default_compare(self.parameters, old_response, '', compare_result)): + if compare_result.pop('/properties/compute_profile/roles/*/target_instance_count', False): + # check if it's workernode + new_count = 0 + old_count = 0 + for role in self.parameters['properties']['compute_profile']['roles']: + if role['name'] == 'workernode': + new_count = role['target_instance_count'] + for role in old_response['properties']['compute_profile']['roles']: + if role['name'] == 'workernode': + old_count = role['target_instance_count'] + if old_count != new_count: + self.new_instance_count = new_count + self.to_do = Actions.Update + if compare_result.pop('/tags', False): + self.to_do = Actions.Update + self.tags_changed = True + if compare_result: + for k in compare_result.keys(): + self.module.warn("property '" + k + "' cannot be updated (" + compare_result[k] + ")") + self.module.warn("only tags and target_instance_count can be updated") + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log("Need to Create / Update the Cluster instance") + self.results['changed'] = True + if self.check_mode: + return self.results + response = self.create_update_cluster() + self.log("Creation / Update done") + elif self.to_do == Actions.Delete: + self.log("Cluster instance deleted") + self.results['changed'] = True + if self.check_mode: + return self.results + self.delete_cluster() + else: + self.log("Cluster instance unchanged") + self.results['changed'] = False + response = old_response + + if self.state == 'present': + self.results.update(self.format_item(response)) + return self.results + + def create_update_cluster(self): + ''' + Creates or updates Cluster with the specified configuration. + + :return: deserialized Cluster instance state dictionary + ''' + self.log("Creating / Updating the Cluster instance {0}".format(self.name)) + + try: + if self.to_do == Actions.Create: + response = self.mgmt_client.clusters.begin_create(resource_group_name=self.resource_group, + cluster_name=self.name, + parameters=self.parameters) + if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller): + response = self.get_poller_result(response) + else: + if self.tags_changed: + response = self.mgmt_client.clusters.update(resource_group_name=self.resource_group, + cluster_name=self.name, + parameters={'tags': self.parameters.get('tags')}) + if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller): + response = self.get_poller_result(response) + if self.new_instance_count: + response = self.mgmt_client.clusters.begin_resize(resource_group_name=self.resource_group, + cluster_name=self.name, + role_name='workernode', + parameters={'target_instance_count': self.new_instance_count}) + if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller): + response = self.get_poller_result(response) + except Exception as exc: + self.fail("Error creating or updating Cluster instance: {0}".format(str(exc))) + return response.as_dict() if response else {} + + def delete_cluster(self): + ''' + Deletes specified Cluster instance in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the Cluster instance {0}".format(self.name)) + try: + response = self.mgmt_client.clusters.begin_delete(resource_group_name=self.resource_group, + cluster_name=self.name) + except Exception as e: + self.fail("Error deleting the Cluster instance: {0}".format(str(e))) + + return True + + def get_cluster(self): + ''' + Gets the properties of the specified Cluster. + + :return: deserialized Cluster instance state dictionary + ''' + self.log("Checking if the Cluster instance {0} is present".format(self.name)) + found = False + try: + response = self.mgmt_client.clusters.get(resource_group_name=self.resource_group, + cluster_name=self.name) + found = True + self.log("Response : {0}".format(response)) + self.log("Cluster instance : {0} found".format(response.name)) + except ResourceNotFoundError as e: + self.log('Did not find the Cluster instance.') + if found is True: + return response.as_dict() + + return False + + def format_item(self, d): + d = { + 'id': d.get('id', None) + } + return d + + +def default_compare(new, old, path, result): + if new is None: + match = True + elif isinstance(new, dict): + match = True + if not isinstance(old, dict): + result[path] = 'old dict is null' + match = False + else: + for k in new.keys(): + if not default_compare(new.get(k), old.get(k, None), path + '/' + k, result): + match = False + elif isinstance(new, list): + if not isinstance(old, list) or len(new) != len(old): + result[path] = 'length is different or null' + match = False + elif len(old) == 0: + match = True + else: + match = True + if isinstance(old[0], dict): + key = None + if 'id' in old[0] and 'id' in new[0]: + key = 'id' + elif 'name' in old[0] and 'name' in new[0]: + key = 'name' + else: + key = list(old[0])[0] + new = sorted(new, key=lambda x: x.get(key, '')) + old = sorted(old, key=lambda x: x.get(key, '')) + else: + new = sorted(new) + old = sorted(old) + for i in range(len(new)): + if not default_compare(new[i], old[i], path + '/*', result): + match = False + return match + else: + if path.endswith('password'): + match = True + else: + if path == '/location' or path.endswith('location_name'): + new = new.replace(' ', '').lower() + old = new.replace(' ', '').lower() + if new == old: + match = True + else: + result[path] = str(new) + ' != ' + str(old) + match = False + return match + + +def dict_camelize(d, path, camelize_first): + if isinstance(d, list): + for i in range(len(d)): + dict_camelize(d[i], path, camelize_first) + elif isinstance(d, dict): + if len(path) == 1: + old_value = d.get(path[0], None) + if old_value is not None: + d[path[0]] = _snake_to_camel(old_value, camelize_first) + else: + sd = d.get(path[0], None) + if sd is not None: + dict_camelize(sd, path[1:], camelize_first) + + +def dict_upper(d, path): + if isinstance(d, list): + for i in range(len(d)): + dict_upper(d[i], path) + elif isinstance(d, dict): + if len(path) == 1: + old_value = d.get(path[0], None) + if old_value is not None: + d[path[0]] = old_value.upper() + else: + sd = d.get(path[0], None) + if sd is not None: + dict_upper(sd, path[1:]) + + +def dict_rename(d, path, new_name): + if isinstance(d, list): + for i in range(len(d)): + dict_rename(d[i], path, new_name) + elif isinstance(d, dict): + if len(path) == 1: + old_value = d.pop(path[0], None) + if old_value is not None: + d[new_name] = old_value + else: + sd = d.get(path[0], None) + if sd is not None: + dict_rename(sd, path[1:], new_name) + + +def dict_expand(d, path, outer_dict_name): + if isinstance(d, list): + for i in range(len(d)): + dict_expand(d[i], path, outer_dict_name) + elif isinstance(d, dict): + if len(path) == 1: + old_value = d.pop(path[0], None) + if old_value is not None: + d[outer_dict_name] = d.get(outer_dict_name, {}) + d[outer_dict_name][path[0]] = old_value + else: + sd = d.get(path[0], None) + if sd is not None: + dict_expand(sd, path[1:], outer_dict_name) + + +def _snake_to_camel(snake, capitalize_first=False): + if capitalize_first: + return ''.join(x.capitalize() or '_' for x in snake.split('_')) + else: + return snake.split('_')[0] + ''.join(x.capitalize() or '_' for x in snake.split('_')[1:]) + + +def main(): + """Main execution""" + AzureRMClusters() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_hdinsightcluster_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_hdinsightcluster_info.py new file mode 100644 index 000000000..09dce3ee3 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_hdinsightcluster_info.py @@ -0,0 +1,322 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Zim Kalinowski, (@zikalino) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_hdinsightcluster_info +version_added: "0.1.2" +short_description: Get Azure HDInsight Cluster facts +description: + - Get facts of Azure HDInsight Cluster. + +options: + resource_group: + description: + - Name of an Azure resource group. + name: + description: + - HDInsight cluster name. + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Get instance of HDInsight Cluster + azure_rm_hdinsightcluster_info: + resource_group: myResourceGroup + name: myCluster + + - name: List instances of HDInsight Cluster + azure_rm_hdinsightcluster_info: + resource_group: myResourceGroup + tags: + - key:value +''' + +RETURN = ''' +clusters: + description: + - A list of dictionaries containing facts for HDInsight Cluster. + returned: always + type: complex + contains: + id: + description: + - The unique resource identifier of the HDInsight Cluster. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.HDInsight/clusters/myCluster" + resource_group: + description: + - Name of an Azure resource group. + returned: always + type: str + sample: myResourceGroup + name: + description: + - The name of the HDInsight Cluster. + returned: always + type: str + sample: testaccount + location: + description: + - The location of the resource group to which the resource belongs. + returned: always + type: str + sample: westus + cluster_version: + description: + - The version of the cluster. + returned: always + type: str + sample: 3.6.1000.67 + os_type: + description: + - The type of operating system. + returned: always + type: str + sample: linux + tier: + description: + - The cluster tier. + returned: always + type: str + sample: standard + cluster_definition: + description: + - The cluster definition. + contains: + kind: + description: + - The type of cluster. + returned: always + type: str + sample: spark + compute_profile_roles: + description: + - The list of roles in the cluster. + type: list + contains: + name: + description: + - The name of the role. + returned: always + type: str + sample: headnode + target_instance_count: + description: + - The instance count of the cluster. + returned: always + type: int + sample: 2 + vm_size: + description: + - The size of the VM. + returned: always + type: str + sample: Standard_D3 + linux_profile: + description: + - The Linux OS profile. + contains: + username: + description: + - User name. + returned: always + type: str + sample: myuser + connectivity_endpoints: + description: + - Cluster's connectivity endpoints. + type: list + contains: + location: + description: + - Endpoint location. + returned: always + type: str + sample: myCluster-ssh.azurehdinsight.net + name: + description: + - Endpoint name. + returned: always + type: str + sample: SSH + port: + description: + - Endpoint port. + returned: always + type: int + sample: 22 + protocol: + description: + - Endpoint protocol. + returned: always + type: str + sample: TCP + tags: + description: + - The tags of the resource. + returned: always + type: complex + sample: {} +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +from ansible.module_utils.common.dict_transformations import _camel_to_snake + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.mgmt.hdinsight import HDInsightManagementClient + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMHDInsightclusterInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str' + ), + name=dict( + type='str' + ), + tags=dict( + type='list', + elements='str' + ) + ) + # store the results of the module operation + self.results = dict( + changed=False + ) + self.mgmt_client = None + self.resource_group = None + self.name = None + self.tags = None + + super(AzureRMHDInsightclusterInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False, facts_module=True) + + def exec_module(self, **kwargs): + + is_old_facts = self.module._name == 'azure_rm_hdinsightcluster_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_hdinsightcluster_facts' module has been renamed to 'azure_rm_hdinsightcluster_info'", + version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + self.mgmt_client = self.get_mgmt_svc_client(HDInsightManagementClient, + is_track2=True, + base_url=self._cloud_environment.endpoints.resource_manager) + + if self.name is not None: + self.results['clusters'] = self.get() + elif self.resource_group is not None: + self.results['clusters'] = self.list_by_resource_group() + else: + self.results['clusters'] = self.list_all() + return self.results + + def get(self): + response = None + results = [] + try: + response = self.mgmt_client.clusters.get(resource_group_name=self.resource_group, + cluster_name=self.name) + self.log("Response : {0}".format(response)) + except ResourceNotFoundError as e: + self.log('Could not get facts for HDInsight Cluster.') + + if response and self.has_tags(response.tags, self.tags): + results.append(self.format_response(response)) + + return results + + def list_by_resource_group(self): + response = None + results = [] + try: + response = self.mgmt_client.clusters.list_by_resource_group(resource_group_name=self.resource_group) + self.log("Response : {0}".format(response)) + except Exception as e: + self.log('Could not get facts for HDInsight Cluster.') + + if response is not None: + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(self.format_response(item)) + + return results + + def list_all(self): + response = None + results = [] + try: + response = self.mgmt_client.clusters.list() + self.log("Response : {0}".format(response)) + except Exception as e: + self.log('Could not get facts for HDInsight Cluster.') + + if response is not None: + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(self.format_response(item)) + + return results + + def format_response(self, item): + d = item.as_dict() + d = { + 'id': d.get('id'), + 'resource_group': self.parse_resource_to_dict(d.get('id')).get('resource_group'), + 'name': d.get('name', None), + 'location': d.get('location', '').replace(' ', '').lower(), + + 'cluster_version': d.get('properties', {}).get('cluster_version'), + 'os_type': d.get('properties', {}).get('os_type'), + 'tier': d.get('properties', {}).get('tier'), + 'cluster_definition': { + 'kind': d.get('properties', {}).get('cluster_definition', {}).get('kind') + }, + 'compute_profile_roles': [{ + 'name': item.get('name'), + 'target_instance_count': item.get('target_instance_count'), + 'vm_size': item.get('hardware_profile', {}).get('vm_size'), + 'linux_profile': { + 'username': item.get('os_profile', {}).get('linux_operating_system_profile', {}).get('username') + } + } for item in d.get('properties', []).get('compute_profile', {}).get('roles', [])], + 'connectivity_endpoints': d.get('properties', {}).get('connectivity_endpoints'), + 'tags': d.get('tags', None) + } + + return d + + +def main(): + AzureRMHDInsightclusterInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_hostgroup.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_hostgroup.py new file mode 100644 index 000000000..d7e80bf14 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_hostgroup.py @@ -0,0 +1,290 @@ +#!/usr/bin/python +# +# Copyright (c) 2021 Aparna Patil(@aparna-patil) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_hostgroup + +version_added: "1.10.0" + +short_description: Create, delete and update a dedicated host group + +description: + - Creates, deletes, and updates a dedicated host group. + +options: + resource_group: + description: + - Name of resource group. + required: true + type: str + name: + description: + - The name of the dedicated host group. + required: true + type: str + location: + description: + - Valid Azure location for host group. Defaults to location of resource group. + type: str + platform_fault_domain_count: + description: + - Number of fault domains that the host group can span. + type: int + zones: + description: + - Availability Zone to use for this host group. Only single zone is supported. The zone can be assigned only + during creation. If not provided, the group supports all zones in the region. + type: list + elements: str + state: + description: + - Assert the state of the host group. Use C(present) to create or update and C(absent) to delete. + default: present + type: str + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Aparna Patil (@aparna-patil) +''' + +EXAMPLES = ''' +- name: Create a host group + azure_rm_hostgroup: + resource_group: myAzureResourceGroup + name: myhostgroup + location: eastus + zones: + - "1" + platform_fault_domain_count: 1 + state: present + +- name: Update a host group + azure_rm_hostgroup: + resource_group: myAzureResourceGroup + name: myhostgroup + location: eastus + zones: + - "1" + platform_fault_domain_count: 1 + state: present + tags: + key1: "value1" + +- name: Delete a host group + azure_rm_hostgroup: + resource_group: myAzureResourceGroup + name: myhostgroup + state: absent +''' + +RETURN = ''' +state: + description: + - Current state of the host group. + returned: always + type: complex + contains: + id: + description: + - The host group ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myAzureResourceGroup/providers/ + Microsoft.Compute/hostGroups/myhostgroup" + name: + description: + - The host group name. + returned: always + type: str + sample: 'myhostgroup' + location: + description: + - The Azure Region where the resource lives. + returned: always + type: str + sample: eastus + platform_fault_domain_count: + description: + - Number of fault domains. + returned: always + type: int + sample: 1 + zones: + description: + - Availability zones configured for this host group. + returned: always + type: list + sample: ["1"] + tags: + description: + - Resource tags. + returned: always + type: list + sample: [{"key1": "value1"}] + type: + description: + - The type of resource. + returned: always + type: str + sample: Microsoft.Compute/hostGroups +''' + +from ansible.module_utils.basic import _load_params +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase, HAS_AZURE, \ + normalize_location_name + +try: + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMHostGroup(AzureRMModuleBase): + + def __init__(self): + + _load_params() + # define user inputs from playbook + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True), + name=dict(type='str', required=True), + location=dict(type='str'), + platform_fault_domain_count=dict(type='int'), + zones=dict(type='list', elements='str'), + state=dict(choices=['present', 'absent'], default='present', type='str') + ) + + required_if = [ + ('state', 'present', ['platform_fault_domain_count']) + ] + + self.results = dict( + changed=False, + state=dict() + ) + + self.resource_group = None + self.name = None + self.state = None + self.location = None + self.tags = None + self.platform_fault_domain_count = None + self.zones = None + + super(AzureRMHostGroup, self).__init__(self.module_arg_spec, + required_if=required_if, + supports_check_mode=True) + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + changed = False + results = dict() + host_group = None + + # retrieve resource group to make sure it exists + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + # Set default location + self.location = resource_group.location + + self.location = normalize_location_name(self.location) + + try: + self.log('Fetching host group {0}'.format(self.name)) + host_group = self.compute_client.dedicated_host_groups.get(self.resource_group, self.name) + # serialize object into a dictionary + results = self.hostgroup_to_dict(host_group) + if self.state == 'present': + changed = False + update_tags, results['tags'] = self.update_tags(results['tags']) + if update_tags: + changed = True + self.tags = results['tags'] + if self.platform_fault_domain_count != results['platform_fault_domain_count']: + self.fail("Error updating host group : {0}. Changing platform_fault_domain_count is not allowed." + .format(self.name)) + if self.zones: + if ('zones' in results and self.zones[0] != results['zones'][0]) or 'zones' not in results: + self.fail("Error updating host group : {0}. Changing property zones is not allowed." + .format(self.name)) + elif self.state == 'absent': + changed = True + + except ResourceNotFoundError: + if self.state == 'present': + changed = True + else: + changed = False + + self.results['changed'] = changed + self.results['state'] = results + + if self.check_mode: + return self.results + + if changed: + if self.state == 'present': + # create or update a dedicated host group + host_group_new = \ + self.compute_models.DedicatedHostGroup(location=self.location, + platform_fault_domain_count=self.platform_fault_domain_count, + zones=self.zones) + if self.tags: + host_group_new.tags = self.tags + self.results['state'] = self.create_or_update_hostgroup(host_group_new) + + elif self.state == 'absent': + # delete a host group + self.delete_hostgroup() + self.results['state'] = 'Deleted' + + return self.results + + def create_or_update_hostgroup(self, host_group): + try: + # create the host group + response = self.compute_client.dedicated_host_groups.create_or_update( + resource_group_name=self.resource_group, + host_group_name=self.name, + parameters=host_group) + except Exception as exc: + self.fail("Error creating or updating host group {0} - {1}".format(self.name, str(exc))) + return self.hostgroup_to_dict(response) + + def delete_hostgroup(self): + try: + # delete the host group + response = self.compute_client.dedicated_host_groups.delete(resource_group_name=self.resource_group, + host_group_name=self.name) + except Exception as exc: + self.fail("Error deleting host group {0} - {1}".format(self.name, str(exc))) + return response + + def hostgroup_to_dict(self, hostgroup): + result = hostgroup.as_dict() + result['tags'] = hostgroup.tags + return result + + +def main(): + AzureRMHostGroup() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_hostgroup_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_hostgroup_info.py new file mode 100644 index 000000000..3f6ce2321 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_hostgroup_info.py @@ -0,0 +1,196 @@ +#!/usr/bin/python +# +# Copyright (c) 2021 Aparna Patil(@aparna-patil) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_hostgroup_info + +version_added: "1.10.0" + +short_description: Get host group facts + +description: + - Get facts for specified dedicated host group or all host groups in a given resource group. + +options: + resource_group: + description: + - Name of the resource group. + type: str + name: + description: + - Name of the host group. + type: str + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Aparna Patil (@aparna-patil) + +''' + +EXAMPLES = ''' +- name: Get facts for one host group + azure_rm_hostgroup_info: + resource_group: myAzureResourceGroup + name: myhostgroup + +- name: Get facts for all host groups in resource group + azure_rm_hostgroup_info: + resource_group: myAzureResourceGroup +''' + +RETURN = ''' +hostgroups: + description: + - Gets a list of dedicated host groups. + returned: always + type: list + elements: dict + sample: [ + { + "hosts": null, + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myAzureResourceGroup/providers/ + Microsoft.Compute/hostGroups/myhostgroup", + "location": "eastus", + "name": "myhostgroup", + "platform_fault_domain_count": 1, + "tags": { + "key1": "value1" + }, + "zones": [ + "1" + ] + } + ] +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError +except Exception: + # This is handled in azure_rm_common + pass + +AZURE_OBJECT_CLASS = 'HostGroup' + + +class AzureRMHostGroupInfo(AzureRMModuleBase): + + def __init__(self): + + # define user inputs into argument + self.module_arg_spec = dict( + name=dict(type='str'), + resource_group=dict(type='str'), + tags=dict(type='list', elements='str') + ) + + # store the results of the module operation + self.results = dict( + changed=False + ) + + self.name = None + self.resource_group = None + self.tags = None + + super(AzureRMHostGroupInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False, facts_module=True) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + results = [] + # list the conditions and results to return based on user input + if self.name is not None: + # if there is a host group name provided, return facts about that dedicated host group + results = self.get_item() + elif self.resource_group: + # all the host groups listed in specific resource group + results = self.list_resource_group() + else: + # all the host groups in a subscription + results = self.list_items() + + self.results['hostgroups'] = self.curated_items(results) + + return self.results + + def get_item(self): + self.log('Get properties for {0}'.format(self.name)) + item = None + results = [] + # get specific host group + try: + item = self.compute_client.dedicated_host_groups.get(self.resource_group, self.name) + except ResourceNotFoundError: + pass + + # serialize result + if item and self.has_tags(item.tags, self.tags): + results = [item] + return results + + def list_resource_group(self): + self.log('List all host groups for resource group - {0}'.format(self.resource_group)) + try: + response = self.compute_client.dedicated_host_groups.list_by_resource_group(self.resource_group) + except ResourceNotFoundError as exc: + self.fail("Failed to list for resource group {0} - {1}".format(self.resource_group, str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(item) + return results + + def list_items(self): + self.log('List all host groups for a subscription ') + try: + response = self.compute_client.dedicated_host_groups.list_by_subscription() + except ResourceNotFoundError as exc: + self.fail("Failed to list all items - {0}".format(str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(item) + return results + + def curated_items(self, raws): + return [self.hostgroup_to_dict(item) for item in raws] if raws else [] + + def hostgroup_to_dict(self, hostgroup): + result = dict( + id=hostgroup.id, + name=hostgroup.name, + location=hostgroup.location, + tags=hostgroup.tags, + platform_fault_domain_count=hostgroup.platform_fault_domain_count, + zones=hostgroup.zones, + hosts=[dict(id=x.id) for x in hostgroup.hosts] if hostgroup.hosts else None + ) + return result + + +def main(): + AzureRMHostGroupInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_image.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_image.py new file mode 100644 index 000000000..8c4fb4475 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_image.py @@ -0,0 +1,388 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Yuwei Zhou, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_image +version_added: "0.0.1" +short_description: Manage Azure image +description: + - Create, delete an image from virtual machine, blob uri, managed disk or snapshot. +options: + resource_group: + description: + - Name of resource group. + required: true + type: str + name: + description: + - Name of the image. + required: true + type: str + source: + description: + - OS disk source from the same region. + - It can be a virtual machine, OS disk blob URI, managed OS disk, or OS snapshot. + - Each type of source except for blob URI can be given as resource id, name or a dict contains C(resource_group), C(name) and C(type). + - If source type is blob URI, the source should be the full URI of the blob in string type. + - If you specify the I(type) in a dict, acceptable value contains C(disks), C(virtual_machines) and C(snapshots). + type: raw + data_disk_sources: + description: + - List of data disk sources, including unmanaged blob URI, managed disk id or name, or snapshot id or name. + type: list + elements: str + location: + description: + - Location of the image. Derived from I(resource_group) if not specified. + type: str + os_type: + description: The OS type of image. + choices: + - Windows + - Linux + type: str + hyper_v_generation: + description: + - Specifies the HyperVGenerationType of the VirtualMachine created from the image. + type: str + choices: + - V1 + - V2 + state: + description: + - Assert the state of the image. Use C(present) to create or update a image and C(absent) to delete an image. + default: present + type: str + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Yuwei Zhou (@yuwzho) + +''' + +EXAMPLES = ''' +- name: Create an image from a virtual machine + azure_rm_image: + resource_group: myResourceGroup + name: myImage + source: myVirtualMachine + +- name: Create an image from os disk + azure_rm_image: + resource_group: myResourceGroup + name: myImage + source: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Compute/disks/disk001 + data_disk_sources: + - datadisk001 + - datadisk002 + os_type: Linux + +- name: Create an image from os disk via dict + azure_rm_image: + resource_group: myResourceGroup + name: myImage + source: + type: disks + resource_group: myResourceGroup + name: disk001 + data_disk_sources: + - datadisk001 + - datadisk002 + os_type: Linux + +- name: Delete an image + azure_rm_image: + state: absent + resource_group: myResourceGroup + name: myImage + source: testvm001 +''' + +RETURN = ''' +id: + description: + - Image resource path. + type: str + returned: success + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Compute/images/myImage" +''' # NOQA + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id + +try: + from msrestazure.tools import parse_resource_id + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMImage(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True), + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + location=dict(type='str'), + source=dict(type='raw'), + data_disk_sources=dict(type='list', elements='str', default=[]), + os_type=dict(type='str', choices=['Windows', 'Linux']), + hyper_v_generation=dict(type='str', choices=['V1', 'V2']) + ) + + self.results = dict( + changed=False, + id=None + ) + + required_if = [ + ('state', 'present', ['source']) + ] + + self.resource_group = None + self.name = None + self.state = None + self.location = None + self.source = None + self.data_disk_sources = None + self.os_type = None + self.hyper_v_generation = None + + super(AzureRMImage, self).__init__(self.module_arg_spec, supports_check_mode=True, required_if=required_if) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + results = None + changed = False + image = None + + if not self.location: + # Set default location + resource_group = self.get_resource_group(self.resource_group) + self.location = resource_group.location + + self.log('Fetching image {0}'.format(self.name)) + image = self.get_image() + if image: + self.check_provisioning_state(image, self.state) + results = image.id + # update is not supported except for tags + update_tags, tags = self.update_tags(image.tags) + if update_tags: + changed = True + self.tags = tags + if self.hyper_v_generation and self.hyper_v_generation != image.hyper_v_generation: + self.log("Compare configure Check whether hyper_v_generation needs to be updated") + self.fail("The hyper_v_generation parameter cannot be updated to {0}".format(self.hyper_v_generation)) + else: + self.hyper_v_generation = image.hyper_v_generation + if self.state == 'absent': + changed = True + # the image does not exist and create a new one + elif self.state == 'present': + changed = True + + self.results['changed'] = changed + self.results['id'] = results + + if changed: + if self.state == 'present': + image_instance = None + # create from virtual machine + vm = self.get_source_vm() + if vm: + if self.data_disk_sources: + self.fail('data_disk_sources is not allowed when capturing image from vm') + image_instance = self.image_models.Image(location=self.location, + source_virtual_machine=self.image_models.SubResource(id=vm.id), + hyper_v_generation=self.hyper_v_generation, + tags=self.tags) + else: + if not self.os_type: + self.fail('os_type is required to create the image') + os_disk = self.create_os_disk() + data_disks = self.create_data_disks() + storage_profile = self.image_models.ImageStorageProfile(os_disk=os_disk, data_disks=data_disks) + image_instance = self.image_models.Image( + location=self.location, + storage_profile=storage_profile, + hyper_v_generation=self.hyper_v_generation, + tags=self.tags + ) + + # finally make the change if not check mode + if not self.check_mode and image_instance: + new_image = self.create_image(image_instance) + self.results['id'] = new_image.id + + elif self.state == 'absent': + if not self.check_mode: + # delete image + self.delete_image() + # the delete does not actually return anything. if no exception, then we'll assume it worked. + self.results['id'] = None + + return self.results + + def resolve_storage_source(self, source): + blob_uri = None + disk = None + snapshot = None + # blob URI can only be given by str + if isinstance(source, str) and source.lower().endswith('.vhd'): + blob_uri = source + return (blob_uri, disk, snapshot) + + tokenize = dict() + if isinstance(source, dict): + tokenize = source + elif isinstance(source, str): + tokenize = parse_resource_id(source) + else: + self.fail("source parameter should be in type string or dictionary") + if tokenize.get('type') == 'disks': + disk = format_resource_id(tokenize['name'], + tokenize.get('subscription_id') or self.subscription_id, + 'Microsoft.Compute', + 'disks', + tokenize.get('resource_group') or self.resource_group) + return (blob_uri, disk, snapshot) + + if tokenize.get('type') == 'snapshots': + snapshot = format_resource_id(tokenize['name'], + tokenize.get('subscription_id') or self.subscription_id, + 'Microsoft.Compute', + 'snapshots', + tokenize.get('resource_group') or self.resource_group) + return (blob_uri, disk, snapshot) + + # not a disk or snapshots + if 'type' in tokenize: + return (blob_uri, disk, snapshot) + + # source can be name of snapshot or disk + snapshot_instance = self.get_snapshot(tokenize.get('resource_group') or self.resource_group, + tokenize['name']) + if snapshot_instance: + snapshot = snapshot_instance.id + return (blob_uri, disk, snapshot) + + disk_instance = self.get_disk(tokenize.get('resource_group') or self.resource_group, + tokenize['name']) + if disk_instance: + disk = disk_instance.id + return (blob_uri, disk, snapshot) + + def create_os_disk(self): + blob_uri, disk, snapshot = self.resolve_storage_source(self.source) + snapshot_resource = self.image_models.SubResource(id=snapshot) if snapshot else None + managed_disk = self.image_models.SubResource(id=disk) if disk else None + return self.image_models.ImageOSDisk(os_type=self.os_type, + os_state=self.image_models.OperatingSystemStateTypes.generalized, + snapshot=snapshot_resource, + managed_disk=managed_disk, + blob_uri=blob_uri) + + def create_data_disk(self, lun, source): + blob_uri, disk, snapshot = self.resolve_storage_source(source) + if blob_uri or disk or snapshot: + snapshot_resource = self.image_models.SubResource(id=snapshot) if snapshot else None + managed_disk = self.image_models.SubResource(id=disk) if disk else None + return self.image_models.ImageDataDisk(lun=lun, + blob_uri=blob_uri, + snapshot=snapshot_resource, + managed_disk=managed_disk) + + def create_data_disks(self): + return list(filter(None, [self.create_data_disk(lun, source) for lun, source in enumerate(self.data_disk_sources)])) + + def get_source_vm(self): + # self.resource can be a vm (id/name/dict), or not a vm. return the vm iff it is an existing vm. + resource = dict() + if isinstance(self.source, dict): + if self.source.get('type') != 'virtual_machines': + return None + resource = dict(type='virtualMachines', + name=self.source['name'], + resource_group=self.source.get('resource_group') or self.resource_group) + elif isinstance(self.source, str): + vm_resource_id = format_resource_id(self.source, + self.subscription_id, + 'Microsoft.Compute', + 'virtualMachines', + self.resource_group) + resource = parse_resource_id(vm_resource_id) + else: + self.fail("Unsupported type of source parameter, please give string or dictionary") + return self.get_vm(resource['resource_group'], resource['name']) if resource['type'] == 'virtualMachines' else None + + def get_snapshot(self, resource_group, snapshot_name): + return self._get_resource(self.image_client.snapshots.get, resource_group, snapshot_name) + + def get_disk(self, resource_group, disk_name): + return self._get_resource(self.image_client.disks.get, resource_group, disk_name) + + def get_vm(self, resource_group, vm_name): + return self._get_resource(self.image_client.virtual_machines.get, resource_group, vm_name, 'instanceview') + + def get_image(self): + return self._get_resource(self.image_client.images.get, self.resource_group, self.name) + + def _get_resource(self, get_method, resource_group, name, expand=None): + try: + if expand: + return get_method(resource_group, name, expand=expand) + else: + return get_method(resource_group, name) + except ResourceNotFoundError as cloud_err: + # Return None iff the resource is not found + if cloud_err.status_code == 404: + self.log('{0}'.format(str(cloud_err))) + return None + self.fail('Error: failed to get resource {0} - {1}'.format(name, str(cloud_err))) + + def create_image(self, image): + try: + poller = self.image_client.images.begin_create_or_update(self.resource_group, self.name, image) + new_image = self.get_poller_result(poller) + except Exception as exc: + self.fail("Error creating image {0} - {1}".format(self.name, str(exc))) + self.check_provisioning_state(new_image) + return new_image + + def delete_image(self): + self.log('Deleting image {0}'.format(self.name)) + try: + poller = self.image_client.images.begin_delete(self.resource_group, self.name) + result = self.get_poller_result(poller) + except Exception as exc: + self.fail("Error deleting image {0} - {1}".format(self.name, str(exc))) + + return result + + +def main(): + AzureRMImage() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_image_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_image_info.py new file mode 100644 index 000000000..e1320f0ce --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_image_info.py @@ -0,0 +1,328 @@ +#!/usr/bin/python +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_image_info + +version_added: "0.0.1" + +short_description: Get facts about azure custom images + +description: + - List azure custom images. The images can be listed where scope of listing can be based on subscription, resource group, name or tags. + +options: + resource_group: + description: + - Name of resource group. + type: str + name: + description: + - Name of the image to filter from existing images. + type: str + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Madhura Naniwadekar (@Madhura-CSI) +''' + + +EXAMPLES = ''' +- name: List images with name + azure_rm_image_info: + name: test-image + resource_group: myResourceGroup + +- name: List images by resource group + azure_rm_image_info: + resource_group: myResourceGroup + tags: + - testing + - foo:bar + +- name: List all available images under current subscription + azure_rm_image_info: +''' + + +RETURN = ''' +images: + description: + - List of image dicts. + returned: always + type: complex + contains: + id: + description: + - Id of the image. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/images/xx + name: + description: + - Name of the image. + returned: always + type: str + sample: foo + resource_group: + description: + - Resource group of the image. + returned: always + type: str + sample: myResourceGroup + location: + description: + - Location of the image. + returned: always + type: str + os_disk: + description: + - Id of os disk for image. + type: str + returned: always + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/disks/xx + os_disk_caching: + description: + - Specifies caching requirements for the image. + returned: always + type: str + os_state: + description: + - Specifies image operating system state. Possible values are C(Generalized) or C(Specialized). + returned: always + type: str + sample: Generalized + os_storage_account_type: + description: + - Specifies the storage account type for the managed disk. + type: str + returned: always + sample: Standard_LRS + os_type: + description: + - Type of OS for image. + returned: always + type: str + sample: Linux + provisioning_state: + description: + - State of image. + returned: always + type: str + sample: Succeeded + source: + description: + - Resource id of source VM from which the image is created. + type: str + returned: always + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/virtualMachines/xx + tags: + description: + - Dictionary of tags associated with the image. + type: dict + returned: always + sample: {"key1":"value1"} + hyper_v_generation: + description: + - The hypervisor generation of the Virtual Machine created from the image. + type: str + returned: always + sample: 'V1' + data_disks: + description: + - List of data disks associated with the image. + type: complex + returned: always + contains: + caching: + description: + - Type of caching of data disk. + type: str + returned: always + sample: read_only + disk_size_gb: + description: + - Specifies the size of empty data disks in gigabytes. + returned: always + type: int + sample: 50 + lun: + description: + - Specifies the logical unit number of the data disk. + returned: always + type: int + sample: 0 + storage_account_type: + description: + - Specifies the storage account type for the managed disk data disk. + type: str + returned: always + sample: Standard_LRS + managed_disk_id: + description: + - Id of managed disk. + type: str + returned: always + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/disks/xx + blob_uri: + description: + - The virtual hard disk. + type: str + returned: always + sample: null +''' + + +try: + from azure.core.exceptions import ResourceNotFoundError +except Exception: + # This is handled in azure_rm_common + pass + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + + +AZURE_ENUM_MODULES = ['azure.mgmt.compute.models'] + + +class AzureRMImageInfo(AzureRMModuleBase): + + def __init__(self, **kwargs): + + self.module_arg_spec = dict( + resource_group=dict(type='str'), + name=dict(type='str'), + tags=dict(type='list', elements='str') + ) + + self.results = dict( + changed=False + ) + + self.resource_group = None + self.name = None + self.format = None + self.tags = None + + super(AzureRMImageInfo, self).__init__( + derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + facts_module=True + ) + + def exec_module(self, **kwargs): + + is_old_facts = self.module._name == 'azure_rm_image_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_image_facts' module has been renamed to 'azure_rm_image_info'", version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.name and self.resource_group: + self.results['images'] = self.get_image(self.resource_group, self.name) + elif self.name and not self.resource_group: + self.results['images'] = self.list_images(self.name) + elif not self.name and self.resource_group: + self.results['images'] = self.list_images_by_resource_group(self.resource_group) + elif not self.name and not self.resource_group: + self.results['images'] = self.list_images() + return self.results + + def get_image(self, resource_group, image_name): + ''' + Returns image details based on its name + ''' + + self.log('Get properties for {0}'.format(self.name)) + + result = [] + item = None + try: + item = self.image_client.images.get(resource_group, image_name) + except ResourceNotFoundError as exc: + self.fail('Failed to list images - {0}'.format(str(exc))) + + result = [self.format_item(item)] + return result + + def list_images_by_resource_group(self, resource_group): + ''' + Returns image details based on its resource group + ''' + + self.log('List images filtered by resource group') + response = None + try: + response = self.image_client.images.list_by_resource_group(resource_group) + except ResourceNotFoundError as exc: + self.fail("Failed to list images: {0}".format(str(exc))) + + return [self.format_item(x) for x in response if self.has_tags(x.tags, self.tags)] if response else [] + + def list_images(self, image_name=None): + ''' + Returns image details in current subscription + ''' + + self.log('List images within current subscription') + response = None + results = [] + try: + response = self.image_client.images.list() + except ResourceNotFoundError as exc: + self.fail("Failed to list all images: {0}".format(str(exc))) + + results = [self.format_item(x) for x in response if self.has_tags(x.tags, self.tags)] if response else [] + if image_name: + results = [result for result in results if result['name'] == image_name] + return results + + def format_item(self, item): + d = item.as_dict() + + for data_disk in d['storage_profile']['data_disks']: + if 'managed_disk' in data_disk.keys(): + data_disk['managed_disk_id'] = data_disk['managed_disk']['id'] + data_disk.pop('managed_disk', None) + + d = { + 'id': d['id'], + 'resource_group': d['id'].split('/')[4], + 'name': d['name'], + 'location': d['location'], + 'tags': d.get('tags'), + 'source': d['source_virtual_machine']['id'] if 'source_virtual_machine' in d.keys() else None, + 'os_type': d['storage_profile']['os_disk']['os_type'], + 'os_state': d['storage_profile']['os_disk']['os_state'], + 'os_disk_caching': d['storage_profile']['os_disk']['caching'], + 'os_storage_account_type': d['storage_profile']['os_disk']['storage_account_type'], + 'os_disk': d['storage_profile']['os_disk']['managed_disk']['id'] if 'managed_disk' in d['storage_profile']['os_disk'].keys() else None, + 'os_blob_uri': d['storage_profile']['os_disk']['blob_uri'] if 'blob_uri' in d['storage_profile']['os_disk'].keys() else None, + 'provisioning_state': d['provisioning_state'], + 'data_disks': d['storage_profile']['data_disks'], + 'hyper_v_generation': d.get('hyper_v_generation') + } + return d + + +def main(): + AzureRMImageInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_iotdevice.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_iotdevice.py new file mode 100644 index 000000000..f05198506 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_iotdevice.py @@ -0,0 +1,463 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Yuwei Zhou, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_iotdevice +version_added: "0.1.2" +short_description: Manage Azure IoT hub device +description: + - Create, delete an Azure IoT hub device. +options: + hub: + description: + - Name of IoT Hub. + type: str + required: true + hub_policy_name: + description: + - Policy name of the IoT Hub which will be used to query from IoT hub. + - This policy should have 'RegistryWrite, ServiceConnect, DeviceConnect' accesses. You may get 401 error when you lack any of these. + type: str + required: true + hub_policy_key: + description: + - Key of the I(hub_policy_name). + type: str + required: true + name: + description: + - Name of the IoT hub device identity. + type: str + required: true + state: + description: + - State of the IoT hub. Use C(present) to create or update an IoT hub device and C(absent) to delete an IoT hub device. + type: str + default: present + choices: + - absent + - present + auth_method: + description: + - The authorization type an entity is to be created with. + type: str + choices: + - sas + - certificate_authority + - self_signed + default: sas + primary_key: + description: + - Explicit self-signed certificate thumbprint to use for primary key. + - Explicit Shared Private Key to use for primary key. + type: str + aliases: + - primary_thumbprint + secondary_key: + description: + - Explicit self-signed certificate thumbprint to use for secondary key. + - Explicit Shared Private Key to use for secondary key. + type: str + aliases: + - secondary_thumbprint + status: + description: + - Set device status upon creation. + type: bool + edge_enabled: + description: + - Flag indicating edge enablement. + - Not supported in IoT Hub with Basic tier. + type: bool + twin_tags: + description: + - A section that the solution back end can read from and write to. + - Tags are not visible to device apps. + - "The tag can be nested dictionary, '.', '$', '#', ' ' is not allowed in the key." + - List is not supported. + - Not supported in IoT Hub with Basic tier. + type: dict + desired: + description: + - Used along with reported properties to synchronize device configuration or conditions. + - "The tag can be nested dictionary, '.', '$', '#', ' ' is not allowed in the key." + - List is not supported. + - Not supported in IoT Hub with Basic tier. + type: dict +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Yuwei Zhou (@yuwzho) + +''' + +EXAMPLES = ''' +- name: Create simplest Azure IoT Hub device + azure_rm_iotdevice: + hub: myHub + name: Testing + hub_policy_name: iothubowner + hub_policy_key: "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" + +- name: Create Azure IoT Edge device + azure_rm_iotdevice: + hub: myHub + name: Testing + hub_policy_name: iothubowner + hub_policy_key: "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" + edge_enabled: yes + +- name: Create Azure IoT Hub device with device twin properties and tag + azure_rm_iotdevice: + hub: myHub + name: Testing + hub_policy_name: iothubowner + hub_policy_key: "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" + twin_tags: + location: + country: US + city: Redmond + sensor: humidity + desired: + period: 100 +''' + +RETURN = ''' +device: + description: + - IoT Hub device. + returned: always + type: dict + sample: { + "authentication": { + "symmetricKey": { + "primaryKey": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", + "secondaryKey": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" + }, + "type": "sas", + "x509Thumbprint": { + "primaryThumbprint": null, + "secondaryThumbprint": null + } + }, + "capabilities": { + "iotEdge": false + }, + "changed": true, + "cloudToDeviceMessageCount": 0, + "connectionState": "Disconnected", + "connectionStateUpdatedTime": "0001-01-01T00:00:00", + "deviceId": "Testing", + "etag": "NzA2NjU2ODc=", + "failed": false, + "generationId": "636903014505613307", + "lastActivityTime": "0001-01-01T00:00:00", + "modules": [ + { + "authentication": { + "symmetricKey": { + "primaryKey": "XXXXXXXXXXXXXXXXXXX", + "secondaryKey": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" + }, + "type": "sas", + "x509Thumbprint": { + "primaryThumbprint": null, + "secondaryThumbprint": null + } + }, + "cloudToDeviceMessageCount": 0, + "connectionState": "Disconnected", + "connectionStateUpdatedTime": "0001-01-01T00:00:00", + "deviceId": "testdevice", + "etag": "MjgxOTE5ODE4", + "generationId": "636903840872788074", + "lastActivityTime": "0001-01-01T00:00:00", + "managedBy": null, + "moduleId": "test" + } + ], + "properties": { + "desired": { + "$metadata": { + "$lastUpdated": "2019-04-10T05:00:46.2702079Z", + "$lastUpdatedVersion": 8, + "period": { + "$lastUpdated": "2019-04-10T05:00:46.2702079Z", + "$lastUpdatedVersion": 8 + } + }, + "$version": 1, + "period": 100 + }, + "reported": { + "$metadata": { + "$lastUpdated": "2019-04-08T06:24:10.5613307Z" + }, + "$version": 1 + } + }, + "status": "enabled", + "statusReason": null, + "statusUpdatedTime": "0001-01-01T00:00:00", + "tags": { + "location": { + "country": "us", + "city": "Redmond" + }, + "sensor": "humidity" + } + } +''' # NOQA + +import json +import copy +import re + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id +from ansible.module_utils.common.dict_transformations import _snake_to_camel + +try: + from msrestazure.tools import parse_resource_id + from msrestazure.azure_exceptions import CloudError +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMIoTDevice(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type='str', required=True), + hub_policy_name=dict(type='str', required=True), + hub_policy_key=dict(type='str', no_log=True, required=True), + hub=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + status=dict(type='bool'), + edge_enabled=dict(type='bool'), + twin_tags=dict(type='dict'), + desired=dict(type='dict'), + auth_method=dict(type='str', choices=['self_signed', 'sas', 'certificate_authority'], default='sas'), + primary_key=dict(type='str', no_log=True, aliases=['primary_thumbprint']), + secondary_key=dict(type='str', no_log=True, aliases=['secondary_thumbprint']) + ) + + self.results = dict( + changed=False, + id=None + ) + + self.name = None + self.hub = None + self.hub_policy_key = None + self.hub_policy_name = None + self.state = None + self.status = None + self.edge_enabled = None + self.twin_tags = None + self.desired = None + self.auth_method = None + self.primary_key = None + self.secondary_key = None + + self._base_url = None + self._mgmt_client = None + self.query_parameters = { + 'api-version': '2018-06-30' + } + self.header_parameters = { + 'Content-Type': 'application/json; charset=utf-8', + 'accept-language': 'en-US' + } + super(AzureRMIoTDevice, self).__init__(self.module_arg_spec, supports_check_mode=True) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec.keys(): + setattr(self, key, kwargs[key]) + + self._base_url = '{0}.azure-devices.net'.format(self.hub) + config = { + 'base_url': self._base_url, + 'key': self.hub_policy_key, + 'policy': self.hub_policy_name + } + self._mgmt_client = self.get_data_svc_client(**config) + + changed = False + + device = self.get_device() + if self.state == 'present': + if not device: + changed = True + auth = {'type': _snake_to_camel(self.auth_method)} + if self.auth_method == 'self_signed': + auth['x509Thumbprint'] = { + 'primaryThumbprint': self.primary_key, + 'secondaryThumbprint': self.secondary_key + } + elif self.auth_method == 'sas': + auth['symmetricKey'] = { + 'primaryKey': self.primary_key, + 'secondaryKey': self.secondary_key + } + device = { + 'deviceId': self.name, + 'capabilities': {'iotEdge': self.edge_enabled or False}, + 'authentication': auth + } + if self.status is not None and not self.status: + device['status'] = 'disabled' + else: + if self.edge_enabled is not None and self.edge_enabled != device['capabilities']['iotEdge']: + changed = True + device['capabilities']['iotEdge'] = self.edge_enabled + if self.status is not None: + status = 'enabled' if self.status else 'disabled' + if status != device['status']: + changed = True + device['status'] = status + if changed and not self.check_mode: + device = self.create_or_update_device(device) + twin = self.get_twin() + if twin: + if not twin.get('tags'): + twin['tags'] = dict() + twin_change = False + if self.twin_tags and not self.is_equal(self.twin_tags, twin['tags']): + twin_change = True + if self.desired and not self.is_equal(self.desired, twin['properties']['desired']): + twin_change = True + if twin_change and not self.check_mode: + self.update_twin(twin) + changed = changed or twin_change + device['tags'] = twin.get('tags') or dict() + device['properties'] = twin['properties'] + device['modules'] = self.list_device_modules() + elif self.twin_tags or self.desired: + self.fail("Device twin is not supported in IoT Hub with basic tier.") + elif device: + if not self.check_mode: + self.delete_device(device['etag']) + changed = True + device = None + self.results = device or dict() + self.results['changed'] = changed + return self.results + + def is_equal(self, updated, original): + changed = False + if not isinstance(updated, dict): + self.fail('The Property or Tag should be a dict') + for key in updated.keys(): + if re.search(r'[.|$|#|\s]', key): + self.fail("Property or Tag name has invalid characters: '.', '$', '#' or ' '. Got '{0}'".format(key)) + original_value = original.get(key) + updated_value = updated[key] + if isinstance(updated_value, dict): + if not isinstance(original_value, dict): + changed = True + original[key] = updated_value + elif not self.is_equal(updated_value, original_value): + changed = True + elif original_value != updated_value: + changed = True + original[key] = updated_value + return not changed + + def create_or_update_device(self, device): + try: + url = '/devices/{0}'.format(self.name) + headers = copy.copy(self.header_parameters) + if device.get('etag'): + headers['If-Match'] = '"{0}"'.format(device['etag']) + request = self._mgmt_client.put(url, self.query_parameters) + response = self._mgmt_client.send(request=request, headers=headers, content=device) + if response.status_code not in [200, 201, 202]: + raise CloudError(response) + return json.loads(response.text) + except Exception as exc: + if exc.status_code in [403] and self.edge_enabled: + self.fail('Edge device is not supported in IoT Hub with Basic tier.') + else: + self.fail('Error when creating or updating IoT Hub device {0}: {1}'.format(self.name, exc.message or str(exc))) + + def delete_device(self, etag): + try: + url = '/devices/{0}'.format(self.name) + headers = copy.copy(self.header_parameters) + headers['If-Match'] = '"{0}"'.format(etag) + request = self._mgmt_client.delete(url, self.query_parameters) + response = self._mgmt_client.send(request=request, headers=headers) + if response.status_code not in [204]: + raise CloudError(response) + except Exception as exc: + self.fail('Error when deleting IoT Hub device {0}: {1}'.format(self.name, exc.message or str(exc))) + + def get_device(self): + try: + url = '/devices/{0}'.format(self.name) + device = self._https_get(url, self.query_parameters, self.header_parameters) + return device + except Exception as exc: + if exc.status_code in [404]: + return None + else: + self.fail('Error when getting IoT Hub device {0}: {1}'.format(self.name, exc.message or str(exc))) + + def get_twin(self): + try: + url = '/twins/{0}'.format(self.name) + return self._https_get(url, self.query_parameters, self.header_parameters) + except Exception as exc: + if exc.status_code in [403]: + # The Basic sku has nothing to to with twin + return None + else: + self.fail('Error when getting IoT Hub device {0} twin: {1}'.format(self.name, exc.message or str(exc))) + + def update_twin(self, twin): + try: + url = '/twins/{0}'.format(self.name) + headers = copy.copy(self.header_parameters) + headers['If-Match'] = '"{0}"'.format(twin['etag']) + request = self._mgmt_client.patch(url, self.query_parameters) + response = self._mgmt_client.send(request=request, headers=headers, content=twin) + if response.status_code not in [200]: + raise CloudError(response) + return json.loads(response.text) + except Exception as exc: + self.fail('Error when creating or updating IoT Hub device twin {0}: {1}'.format(self.name, exc.message or str(exc))) + + def list_device_modules(self): + try: + url = '/devices/{0}/modules'.format(self.name) + return self._https_get(url, self.query_parameters, self.header_parameters) + except Exception as exc: + self.fail('Error when listing IoT Hub device {0} modules: {1}'.format(self.name, exc.message or str(exc))) + + def _https_get(self, url, query_parameters, header_parameters): + request = self._mgmt_client.get(url, query_parameters) + response = self._mgmt_client.send(request=request, headers=header_parameters, content=None) + if response.status_code not in [200]: + raise CloudError(response) + return json.loads(response.text) + + +def main(): + AzureRMIoTDevice() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_iotdevice_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_iotdevice_info.py new file mode 100644 index 000000000..ef82bdc17 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_iotdevice_info.py @@ -0,0 +1,308 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Yuwei Zhou, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_iotdevice_info +version_added: "0.1.2" +short_description: Facts of Azure IoT hub device +description: + - Query, get Azure IoT hub device. +options: + hub: + description: + - Name of IoT Hub. + type: str + required: true + hub_policy_name: + description: + - Policy name of the IoT Hub which will be used to query from IoT hub. + - This policy should have at least 'Registry Read' access. + type: str + required: true + hub_policy_key: + description: + - Key of the I(hub_policy_name). + type: str + required: true + name: + description: + - Name of the IoT hub device identity. + type: str + aliases: + - device_id + module_id: + description: + - Name of the IoT hub device module. + - Must use with I(device_id) defined. + type: str + query: + description: + - Query an IoT hub to retrieve information regarding device twins using a SQL-like language. + - "See U(https://docs.microsoft.com/en-us/azure/iot-hub/iot-hub-devguide-query-language)." + type: str + top: + description: + - Used when I(name) not defined. + - List the top n devices in the query. + type: int +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Yuwei Zhou (@yuwzho) +''' + +EXAMPLES = ''' +- name: Get the details of a device + azure_rm_iotdevice_info: + name: Testing + hub: MyIoTHub + hub_policy_name: registryRead + hub_policy_key: XXXXXXXXXXXXXXXXXXXX + +- name: Query all device modules in an IoT Hub + azure_rm_iotdevice_info: + query: "SELECT * FROM devices.modules" + hub: MyIoTHub + hub_policy_name: registryRead + hub_policy_key: XXXXXXXXXXXXXXXXXXXX + +- name: List all devices in an IoT Hub + azure_rm_iotdevice_info: + hub: MyIoTHub + hub_policy_name: registryRead + hub_policy_key: XXXXXXXXXXXXXXXXXXXX +''' + +RETURN = ''' +iot_devices: + description: + - IoT Hub device. + returned: always + type: dict + sample: { + "authentication": { + "symmetricKey": { + "primaryKey": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", + "secondaryKey": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" + }, + "type": "sas", + "x509Thumbprint": { + "primaryThumbprint": null, + "secondaryThumbprint": null + } + }, + "capabilities": { + "iotEdge": false + }, + "changed": true, + "cloudToDeviceMessageCount": 0, + "connectionState": "Disconnected", + "connectionStateUpdatedTime": "0001-01-01T00:00:00", + "deviceId": "Testing", + "etag": "NzA2NjU2ODc=", + "failed": false, + "generationId": "636903014505613307", + "lastActivityTime": "0001-01-01T00:00:00", + "modules": [ + { + "authentication": { + "symmetricKey": { + "primaryKey": "XXXXXXXXXXXXXXXXXXX", + "secondaryKey": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" + }, + "type": "sas", + "x509Thumbprint": { + "primaryThumbprint": null, + "secondaryThumbprint": null + } + }, + "cloudToDeviceMessageCount": 0, + "connectionState": "Disconnected", + "connectionStateUpdatedTime": "0001-01-01T00:00:00", + "deviceId": "testdevice", + "etag": "MjgxOTE5ODE4", + "generationId": "636903840872788074", + "lastActivityTime": "0001-01-01T00:00:00", + "managedBy": null, + "moduleId": "test" + } + ], + "properties": { + "desired": { + "$metadata": { + "$lastUpdated": "2019-04-10T05:00:46.2702079Z", + "$lastUpdatedVersion": 8, + "period": { + "$lastUpdated": "2019-04-10T05:00:46.2702079Z", + "$lastUpdatedVersion": 8 + } + }, + "$version": 1, + "period": 100 + }, + "reported": { + "$metadata": { + "$lastUpdated": "2019-04-08T06:24:10.5613307Z" + }, + "$version": 1 + } + }, + "status": "enabled", + "statusReason": null, + "statusUpdatedTime": "0001-01-01T00:00:00", + "tags": { + "location": { + "country": "us", + "city": "Redmond" + }, + "sensor": "humidity" + } + } +''' # NOQA + +import json + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id +from ansible.module_utils.common.dict_transformations import _snake_to_camel, _camel_to_snake + +try: + from msrestazure.tools import parse_resource_id + from msrestazure.azure_exceptions import CloudError +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMIoTDeviceFacts(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type='str', aliases=['device_id']), + module_id=dict(type='str'), + query=dict(type='str'), + hub=dict(type='str', required=True), + hub_policy_name=dict(type='str', required=True), + hub_policy_key=dict(type='str', no_log=True, required=True), + top=dict(type='int') + ) + + self.results = dict( + changed=False, + iot_devices=[] + ) + + self.name = None + self.module_id = None + self.hub = None + self.hub_policy_name = None + self.hub_policy_key = None + self.top = None + + self._mgmt_client = None + self._base_url = None + self.query_parameters = { + 'api-version': '2018-06-30' + } + self.header_parameters = { + 'Content-Type': 'application/json; charset=utf-8', + 'accept-language': 'en-US' + } + super(AzureRMIoTDeviceFacts, self).__init__(self.module_arg_spec, supports_check_mode=True) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec.keys(): + setattr(self, key, kwargs[key]) + + self._base_url = '{0}.azure-devices.net'.format(self.hub) + config = { + 'base_url': self._base_url, + 'key': self.hub_policy_key, + 'policy': self.hub_policy_name + } + if self.top: + self.query_parameters['top'] = self.top + self._mgmt_client = self.get_data_svc_client(**config) + + response = [] + if self.module_id: + response = [self.get_device_module()] + elif self.name: + response = [self.get_device()] + elif self.query: + response = self.hub_query() + else: + response = self.list_devices() + + self.results['iot_devices'] = response + return self.results + + def hub_query(self): + try: + url = '/devices/query' + request = self._mgmt_client.post(url, self.query_parameters) + query = { + 'query': self.query + } + response = self._mgmt_client.send(request=request, headers=self.header_parameters, content=query) + if response.status_code not in [200]: + raise CloudError(response) + return json.loads(response.text) + except Exception as exc: + self.fail('Error when running query "{0}" in IoT Hub {1}: {2}'.format(self.query, self.hub, exc.message or str(exc))) + + def get_device(self): + try: + url = '/devices/{0}'.format(self.name) + device = self._https_get(url, self.query_parameters, self.header_parameters) + device['modules'] = self.list_device_modules() + return device + except Exception as exc: + self.fail('Error when getting IoT Hub device {0}: {1}'.format(self.name, exc.message or str(exc))) + + def get_device_module(self): + try: + url = '/devices/{0}/modules/{1}'.format(self.name, self.module_id) + return self._https_get(url, self.query_parameters, self.header_parameters) + except Exception as exc: + self.fail('Error when getting IoT Hub device {0}: {1}'.format(self.name, exc.message or str(exc))) + + def list_device_modules(self): + try: + url = '/devices/{0}/modules'.format(self.name) + return self._https_get(url, self.query_parameters, self.header_parameters) + except Exception as exc: + self.fail('Error when getting IoT Hub device {0}: {1}'.format(self.name, exc.message or str(exc))) + + def list_devices(self): + try: + url = '/devices' + return self._https_get(url, self.query_parameters, self.header_parameters) + except Exception as exc: + self.fail('Error when listing IoT Hub devices in {0}: {1}'.format(self.hub, exc.message or str(exc))) + + def _https_get(self, url, query_parameters, header_parameters): + request = self._mgmt_client.get(url, query_parameters) + response = self._mgmt_client.send(request=request, headers=header_parameters, content=None) + if response.status_code not in [200]: + raise CloudError(response) + return json.loads(response.text) + + +def main(): + AzureRMIoTDeviceFacts() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_iotdevicemodule.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_iotdevicemodule.py new file mode 100644 index 000000000..d23435130 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_iotdevicemodule.py @@ -0,0 +1,369 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Yuwei Zhou, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_iotdevicemodule +version_added: "0.1.2" +short_description: Manage Azure IoT hub device module +description: + - Create, delete an Azure IoT hub device module. +options: + hub: + description: + - Name of IoT Hub. + type: str + required: true + hub_policy_name: + description: + - Policy name of the IoT Hub which will be used to query from IoT hub. + - This policy should have at least 'Registry Read' access. + type: str + required: true + hub_policy_key: + description: + - Key of the I(hub_policy_name). + type: str + required: true + name: + description: + - Name of the IoT hub device identity. + type: str + required: true + device: + description: + - Device name the module associate with. + required: true + type: str + state: + description: + - State of the IoT hub. Use C(present) to create or update an IoT hub device and C(absent) to delete an IoT hub device. + type: str + default: present + choices: + - absent + - present + auth_method: + description: + - The authorization type an entity is to be created with. + type: str + choices: + - sas + - certificate_authority + - self_signed + default: sas + primary_key: + description: + - Explicit self-signed certificate thumbprint to use for primary key. + - Explicit Shared Private Key to use for primary key. + type: str + aliases: + - primary_thumbprint + secondary_key: + description: + - Explicit self-signed certificate thumbprint to use for secondary key. + - Explicit Shared Private Key to use for secondary key. + type: str + aliases: + - secondary_thumbprint + twin_tags: + description: + - A section that the solution back end can read from and write to. + - Tags are not visible to device apps. + - "The tag can be nested dictionary, '.', '$', '#', ' ' is not allowed in the key." + - List is not supported. + type: dict + desired: + description: + - Used along with reported properties to synchronize device configuration or conditions. + - "The tag can be nested dictionary, '.', '$', '#', ' ' is not allowed in the key." + - List is not supported. + type: dict +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Yuwei Zhou (@yuwzho) + +''' + +EXAMPLES = ''' +- name: Create simplest Azure IoT Hub device module + azure_rm_iotdevicemodule: + hub: myHub + name: Testing + device: mydevice + hub_policy_name: iothubowner + hub_policy_key: "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" + +- name: Create Azure IoT Edge device module + azure_rm_iotdevice: + hub: myHub + device: mydevice + name: Testing + hub_policy_name: iothubowner + hub_policy_key: "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" + edge_enabled: yes + +- name: Create Azure IoT Hub device module with module twin properties and tag + azure_rm_iotdevice: + hub: myHub + name: Testing + device: mydevice + hub_policy_name: iothubowner + hub_policy_key: "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" + twin_tags: + location: + country: US + city: Redmond + sensor: humidity + desired: + period: 100 +''' + +RETURN = ''' +module: + description: + - IoT Hub device. + returned: always + type: dict + sample: { + "authentication": { + "symmetricKey": { + "primaryKey": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", + "secondaryKey": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" + }, + "type": "sas", + "x509Thumbprint": { + "primaryThumbprint": null, + "secondaryThumbprint": null + } + }, + "cloudToDeviceMessageCount": 0, + "connectionState": "Disconnected", + "connectionStateUpdatedTime": "0001-01-01T00:00:00", + "deviceId": "mydevice", + "etag": "ODM2NjI3ODg=", + "generationId": "636904759703045768", + "lastActivityTime": "0001-01-01T00:00:00", + "managedBy": null, + "moduleId": "Testing" + } +''' # NOQA + +import json +import copy +import re + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id +from ansible.module_utils.common.dict_transformations import _snake_to_camel + +try: + from msrestazure.tools import parse_resource_id + from msrestazure.azure_exceptions import CloudError +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMIoTDeviceModule(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type='str', required=True), + hub_policy_name=dict(type='str', required=True), + hub_policy_key=dict(type='str', no_log=True, required=True), + hub=dict(type='str', required=True), + device=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + twin_tags=dict(type='dict'), + desired=dict(type='dict'), + auth_method=dict(type='str', choices=['self_signed', 'sas', 'certificate_authority'], default='sas'), + primary_key=dict(type='str', no_log=True, aliases=['primary_thumbprint']), + secondary_key=dict(type='str', no_log=True, aliases=['secondary_thumbprint']) + ) + + self.results = dict( + changed=False, + id=None + ) + + self.name = None + self.hub = None + self.device = None + self.hub_policy_key = None + self.hub_policy_name = None + self.state = None + self.twin_tags = None + self.desired = None + self.auth_method = None + self.primary_key = None + self.secondary_key = None + + self._base_url = None + self._mgmt_client = None + self.query_parameters = { + 'api-version': '2018-06-30' + } + self.header_parameters = { + 'Content-Type': 'application/json; charset=utf-8', + 'accept-language': 'en-US' + } + super(AzureRMIoTDeviceModule, self).__init__(self.module_arg_spec, supports_check_mode=True) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec.keys(): + setattr(self, key, kwargs[key]) + + self._base_url = '{0}.azure-devices.net'.format(self.hub) + config = { + 'base_url': self._base_url, + 'key': self.hub_policy_key, + 'policy': self.hub_policy_name + } + self._mgmt_client = self.get_data_svc_client(**config) + + changed = False + + module = self.get_module() + if self.state == 'present': + if not module: + changed = True + auth = {'type': _snake_to_camel(self.auth_method)} + if self.auth_method == 'self_signed': + auth['x509Thumbprint'] = { + 'primaryThumbprint': self.primary_key, + 'secondaryThumbprint': self.secondary_key + } + elif self.auth_method == 'sas': + auth['symmetricKey'] = { + 'primaryKey': self.primary_key, + 'secondaryKey': self.secondary_key + } + module = { + 'deviceId': self.device, + 'moduleId': self.name, + 'authentication': auth + } + if changed and not self.check_mode: + module = self.create_or_update_module(module) + twin = self.get_twin() + if not twin.get('tags'): + twin['tags'] = dict() + twin_change = False + if self.twin_tags and not self.is_equal(self.twin_tags, twin['tags']): + twin_change = True + if self.desired and not self.is_equal(self.desired, twin['properties']['desired']): + self.module.warn('desired') + twin_change = True + if twin_change and not self.check_mode: + twin = self.update_twin(twin) + changed = changed or twin_change + module['tags'] = twin.get('tags') or dict() + module['properties'] = twin['properties'] + elif module: + if not self.check_mode: + self.delete_module(module['etag']) + changed = True + module = None + self.results = module or dict() + self.results['changed'] = changed + return self.results + + def is_equal(self, updated, original): + changed = False + if not isinstance(updated, dict): + self.fail('The Property or Tag should be a dict') + for key in updated.keys(): + if re.search(r'[.|$|#|\s]', key): + self.fail("Property or Tag name has invalid characters: '.', '$', '#' or ' '. Got '{0}'".format(key)) + original_value = original.get(key) + updated_value = updated[key] + if isinstance(updated_value, dict): + if not isinstance(original_value, dict): + changed = True + original[key] = updated_value + elif not self.is_equal(updated_value, original_value): + changed = True + elif original_value != updated_value: + changed = True + original[key] = updated_value + return not changed + + def create_or_update_module(self, module): + try: + url = '/devices/{0}/modules/{1}'.format(self.device, self.name) + headers = copy.copy(self.header_parameters) + if module.get('etag'): + headers['If-Match'] = '"{0}"'.format(module['etag']) + request = self._mgmt_client.put(url, self.query_parameters) + response = self._mgmt_client.send(request=request, headers=headers, content=module) + if response.status_code not in [200, 201]: + raise CloudError(response) + return json.loads(response.text) + except Exception as exc: + self.fail('Error when creating or updating IoT Hub device {0}: {1}'.format(self.name, exc.message or str(exc))) + + def delete_module(self, etag): + try: + url = '/devices/{0}/modules/{1}'.format(self.device, self.name) + headers = copy.copy(self.header_parameters) + headers['If-Match'] = '"{0}"'.format(etag) + request = self._mgmt_client.delete(url, self.query_parameters) + response = self._mgmt_client.send(request=request, headers=headers) + if response.status_code not in [204]: + raise CloudError(response) + except Exception as exc: + self.fail('Error when deleting IoT Hub device {0}: {1}'.format(self.name, exc.message or str(exc))) + + def get_module(self): + try: + url = '/devices/{0}/modules/{1}'.format(self.device, self.name) + return self._https_get(url, self.query_parameters, self.header_parameters) + except Exception: + return None + + def get_twin(self): + try: + url = '/twins/{0}/modules/{1}'.format(self.device, self.name) + return self._https_get(url, self.query_parameters, self.header_parameters) + except Exception as exc: + self.fail('Error when getting IoT Hub device {0} module twin {1}: {2}'.format(self.device, self.name, exc.message or str(exc))) + + def update_twin(self, twin): + try: + url = '/twins/{0}/modules/{1}'.format(self.device, self.name) + headers = copy.copy(self.header_parameters) + headers['If-Match'] = twin['etag'] + request = self._mgmt_client.patch(url, self.query_parameters) + response = self._mgmt_client.send(request=request, headers=headers, content=twin) + if response.status_code not in [200]: + raise CloudError(response) + return json.loads(response.text) + except Exception as exc: + self.fail('Error when creating or updating IoT Hub device {0} module twin {1}: {2}'.format(self.device, self.name, exc.message or str(exc))) + + def _https_get(self, url, query_parameters, header_parameters): + request = self._mgmt_client.get(url, query_parameters) + response = self._mgmt_client.send(request=request, headers=header_parameters, content=None) + if response.status_code not in [200]: + raise CloudError(response) + return json.loads(response.text) + + +def main(): + AzureRMIoTDeviceModule() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_iothub.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_iothub.py new file mode 100644 index 000000000..daf59783a --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_iothub.py @@ -0,0 +1,892 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Yuwei Zhou, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_iothub +version_added: "0.1.2" +short_description: Manage Azure IoT hub +description: + - Create, delete an Azure IoT hub. +options: + resource_group: + description: + - Name of resource group. + type: str + required: true + name: + description: + - Name of the IoT hub. + type: str + required: true + state: + description: + - State of the IoT hub. Use C(present) to create or update an IoT hub and C(absent) to delete an IoT hub. + type: str + default: present + choices: + - absent + - present + location: + description: + - Location of the IoT hub. + type: str + sku: + description: + - Pricing tier for Azure IoT Hub. + - Note that only one free IoT hub instance is allowed in each subscription. Exception will be thrown if free instances exceed one. + - Default is C(s1) when creation. + type: str + choices: + - b1 + - b2 + - b3 + - f1 + - s1 + - s2 + - s3 + unit: + description: + - Units in your IoT Hub. + - Default is C(1). + type: int + event_endpoint: + description: + - The Event Hub-compatible endpoint property. + type: dict + suboptions: + partition_count: + description: + - The number of partitions for receiving device-to-cloud messages in the Event Hub-compatible endpoint. + - "See U(https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages)." + - Default is C(2). + type: int + retention_time_in_days: + description: + - The retention time for device-to-cloud messages in days. + - "See U(https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages)." + - Default is C(1). + type: int + enable_file_upload_notifications: + description: + - File upload notifications are enabled if set to C(True). + type: bool + ip_filters: + description: + - Configure rules for rejecting or accepting traffic from specific IPv4 addresses. + type: list + elements: dict + suboptions: + name: + description: + - Name of the filter. + type: str + required: yes + ip_mask: + description: + - A string that contains the IP address range in CIDR notation for the rule. + type: str + required: yes + action: + description: + - The desired action for requests captured by this rule. + type: str + required: yes + choices: + - accept + - reject + routing_endpoints: + description: + - Custom endpoints. + type: list + elements: dict + suboptions: + name: + description: + - Name of the custom endpoint. + type: str + required: yes + resource_group: + description: + - Resource group of the endpoint. + - Default is the same as I(resource_group). + type: str + subscription: + description: + - Subscription id of the endpoint. + - Default is the same as I(subscription). + type: str + resource_type: + description: + - Resource type of the custom endpoint. + type: str + choices: + - eventhub + - queue + - storage + - topic + required: yes + connection_string: + description: + - Connection string of the custom endpoint. + - The connection string should have send privilege. + type: str + required: yes + container: + description: + - Container name of the custom endpoint when I(resource_type=storage). + type: str + encoding: + description: + - Encoding of the message when I(resource_type=storage). + type: str + routes: + description: + - Route device-to-cloud messages to service-facing endpoints. + type: list + elements: dict + suboptions: + name: + description: + - Name of the route. + type: str + required: yes + source: + description: + - The origin of the data stream to be acted upon. + type: str + choices: + - device_messages + - twin_change_events + - device_lifecycle_events + - device_job_lifecycle_events + required: yes + enabled: + description: + - Whether to enable the route. + type: bool + required: yes + endpoint_name: + description: + - The name of the endpoint in I(routing_endpoints) where IoT Hub sends messages that match the query. + type: str + required: yes + condition: + description: + - "The query expression for the routing query that is run against the message application properties, + system properties, message body, device twin tags, and device twin properties to determine if it is a match for the endpoint." + - "For more information about constructing a query, + see U(https://docs.microsoft.com/en-us/azure/iot-hub/iot-hub-devguide-routing-query-syntax)" + type: str +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Yuwei Zhou (@yuwzho) + +''' + +EXAMPLES = ''' +- name: Create a simplest IoT hub + azure_rm_iothub: + name: Testing + resource_group: myResourceGroup +- name: Create an IoT hub with route + azure_rm_iothub: + resource_group: myResourceGroup + name: Testing + routing_endpoints: + - connection_string: "Endpoint=sb://qux.servicebus.windows.net/;SharedAccessKeyName=quux;SharedAccessKey=****;EntityPath=myQueue" + name: foo + resource_type: queue + resource_group: myResourceGroup1 + routes: + - name: bar + source: device_messages + endpoint_name: foo + enabled: yes +''' + +RETURN = ''' +id: + description: + - Resource ID of the IoT hub. + sample: "/subscriptions/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/resourceGroups/myResourceGroup/providers/Microsoft.Devices/IotHubs/Testing" + returned: success + type: str +name: + description: + - Name of the IoT hub. + sample: Testing + returned: success + type: str +resource_group: + description: + - Resource group of the IoT hub. + sample: myResourceGroup. + returned: success + type: str +location: + description: + - Location of the IoT hub. + sample: eastus + returned: success + type: str +unit: + description: + - Units in the IoT Hub. + sample: 1 + returned: success + type: int +sku: + description: + - Pricing tier for Azure IoT Hub. + sample: f1 + returned: success + type: str +cloud_to_device: + description: + - Cloud to device message properties. + contains: + max_delivery_count: + description: + - The number of times the IoT hub attempts to deliver a message on the feedback queue. + - "See U(https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages)." + type: int + returned: success + sample: 10 + ttl_as_iso8601: + description: + - The period of time for which a message is available to consume before it is expired by the IoT hub. + - "See U(https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages)." + type: str + returned: success + sample: "1:00:00" + returned: success + type: complex +enable_file_upload_notifications: + description: + - Whether file upload notifications are enabled. + sample: True + returned: success + type: bool +event_endpoints: + description: + - Built-in endpoint where to deliver device message. + contains: + endpoint: + description: + - The Event Hub-compatible endpoint. + type: str + returned: success + sample: "sb://iothub-ns-testing-1478811-9bbc4a15f0.servicebus.windows.net/" + partition_count: + description: + - The number of partitions for receiving device-to-cloud messages in the Event Hub-compatible endpoint. + - "See U(https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages)." + type: int + returned: success + sample: 2 + retention_time_in_days: + description: + - The retention time for device-to-cloud messages in days. + - "See U(https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages)." + type: int + returned: success + sample: 1 + partition_ids: + description: + - List of the partition id for the event endpoint. + type: list + returned: success + sample: ["0", "1"] + returned: success + type: complex +host_name: + description: + - Host of the IoT hub. + sample: "testing.azure-devices.net" + returned: success + type: str +ip_filters: + description: + - Configure rules for rejecting or accepting traffic from specific IPv4 addresses. + contains: + name: + description: + - Name of the filter. + type: str + returned: success + sample: filter + ip_mask: + description: + - A string that contains the IP address range in CIDR notation for the rule. + type: str + returned: success + sample: 40.54.7.3 + action: + description: + - The desired action for requests captured by this rule. + type: str + returned: success + sample: Reject + returned: success + type: complex +routing_endpoints: + description: + - Custom endpoints. + contains: + event_hubs: + description: + - List of custom endpoints of event hubs. + type: complex + returned: success + contains: + name: + description: + - Name of the custom endpoint. + type: str + returned: success + sample: foo + resource_group: + description: + - Resource group of the endpoint. + type: str + returned: success + sample: bar + subscription: + description: + - Subscription id of the endpoint. + type: str + returned: success + sample: "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX" + connection_string: + description: + - Connection string of the custom endpoint. + type: str + returned: success + sample: "Endpoint=sb://quux.servicebus.windows.net:5671/;SharedAccessKeyName=qux;SharedAccessKey=****;EntityPath=foo" + service_bus_queues: + description: + - List of custom endpoints of service bus queue. + type: complex + returned: always + contains: + name: + description: + - Name of the custom endpoint. + type: str + returned: success + sample: foo + resource_group: + description: + - Resource group of the endpoint. + type: str + returned: success + sample: bar + subscription: + description: + - Subscription ID of the endpoint. + type: str + returned: success + sample: "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX" + connection_string: + description: + - Connection string of the custom endpoint. + type: str + returned: success + sample: "Endpoint=sb://quux.servicebus.windows.net:5671/;SharedAccessKeyName=qux;SharedAccessKey=****;EntityPath=foo" + service_bus_topics: + description: + - List of custom endpoints of service bus topic. + type: complex + returned: success + contains: + name: + description: + - Name of the custom endpoint. + type: str + returned: success + sample: foo + resource_group: + description: + - Resource group of the endpoint. + type: str + returned: success + sample: bar + subscription: + description: + - Subscription ID of the endpoint. + type: str + returned: success + sample: "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX" + connection_string: + description: + - Connection string of the custom endpoint. + type: str + returned: success + sample: "Endpoint=sb://quux.servicebus.windows.net:5671/;SharedAccessKeyName=qux;SharedAccessKey=****;EntityPath=foo" + storage_containers: + description: + - List of custom endpoints of storage + type: complex + returned: success + contains: + name: + description: + - Name of the custom endpoint. + type: str + returned: success + sample: foo + resource_group: + description: + - Resource group of the endpoint. + type: str + returned: success + sample: bar + subscription: + description: + - Subscription ID of the endpoint. + type: str + returned: success + sample: "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX" + connection_string: + description: + - Connection string of the custom endpoint. + type: str + returned: success + sample: "Endpoint=sb://quux.servicebus.windows.net:5671/;SharedAccessKeyName=qux;SharedAccessKey=****;EntityPath=foo" + returned: success + type: complex +routes: + description: + - Route device-to-cloud messages to service-facing endpoints. + type: complex + returned: success + contains: + name: + description: + - Name of the route. + type: str + returned: success + sample: route1 + source: + description: + - The origin of the data stream to be acted upon. + type: str + returned: success + sample: device_messages + enabled: + description: + - Whether to enable the route. + type: str + returned: success + sample: true + endpoint_name: + description: + - The name of the endpoint in C(routing_endpoints) where IoT Hub sends messages that match the query. + type: str + returned: success + sample: foo + condition: + description: + - "The query expression for the routing query that is run against the message application properties, + system properties, message body, device twin tags, and device twin properties to determine if it is a match for the endpoint." + - "For more information about constructing a query, + see I(https://docs.microsoft.com/en-us/azure/iot-hub/iot-hub-devguide-routing-query-syntax)" + type: bool + returned: success + sample: "true" +''' # NOQA + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id +from ansible.module_utils.common.dict_transformations import _snake_to_camel, _camel_to_snake +import re + +try: + from msrestazure.tools import parse_resource_id +except ImportError: + # This is handled in azure_rm_common + pass + + +ip_filter_spec = dict( + name=dict(type='str', required=True), + ip_mask=dict(type='str', required=True), + action=dict(type='str', required=True, choices=['accept', 'reject']) +) + + +routing_endpoints_spec = dict( + connection_string=dict(type='str', required=True), + name=dict(type='str', required=True), + resource_group=dict(type='str'), + subscription=dict(type='str'), + resource_type=dict(type='str', required=True, choices=['eventhub', 'queue', 'storage', 'topic']), + container=dict(type='str'), + encoding=dict(type='str') +) + + +routing_endpoints_resource_type_mapping = { + 'eventhub': {'model': 'RoutingEventHubProperties', 'attribute': 'event_hubs'}, + 'queue': {'model': 'RoutingServiceBusQueueEndpointProperties', 'attribute': 'service_bus_queues'}, + 'topic': {'model': 'RoutingServiceBusTopicEndpointProperties', 'attribute': 'service_bus_topics'}, + 'storage': {'model': 'RoutingStorageContainerProperties', 'attribute': 'storage_containers'} +} + + +routes_spec = dict( + name=dict(type='str', required=True), + source=dict(type='str', required=True, choices=['device_messages', 'twin_change_events', 'device_lifecycle_events', 'device_job_lifecycle_events']), + enabled=dict(type='bool', required=True), + endpoint_name=dict(type='str', required=True), + condition=dict(type='str') +) + + +event_endpoint_spec = dict( + partition_count=dict(type='int'), + retention_time_in_days=dict(type='int') +) + + +class AzureRMIoTHub(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True), + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + location=dict(type='str'), + sku=dict(type='str', choices=['b1', 'b2', 'b3', 'f1', 's1', 's2', 's3']), + unit=dict(type='int'), + event_endpoint=dict(type='dict', options=event_endpoint_spec), + enable_file_upload_notifications=dict(type='bool'), + ip_filters=dict(type='list', elements='dict', options=ip_filter_spec), + routing_endpoints=dict(type='list', elements='dict', options=routing_endpoints_spec), + routes=dict(type='list', elements='dict', options=routes_spec) + ) + + self.results = dict( + changed=False, + id=None + ) + + self.resource_group = None + self.name = None + self.state = None + self.location = None + self.sku = None + self.unit = None + self.event_endpoint = None + self.tags = None + self.enable_file_upload_notifications = None + self.ip_filters = None + self.routing_endpoints = None + self.routes = None + + super(AzureRMIoTHub, self).__init__(self.module_arg_spec, supports_check_mode=True) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + changed = False + + if not self.location: + # Set default location + resource_group = self.get_resource_group(self.resource_group) + self.location = resource_group.location + self.sku = str.capitalize(self.sku) if self.sku else None + iothub = self.get_hub() + if self.state == 'present': + if not iothub: + changed = True + self.sku = self.sku or 'S1' + self.unit = self.unit or 1 + self.event_endpoint = self.event_endpoint or {} + self.event_endpoint['partition_count'] = self.event_endpoint.get('partition_count') or 2 + self.event_endpoint['retention_time_in_days'] = self.event_endpoint.get('retention_time_in_days') or 1 + event_hub_properties = dict() + event_hub_properties['events'] = self.IoThub_models.EventHubProperties(**self.event_endpoint) + iothub_property = self.IoThub_models.IotHubProperties(event_hub_endpoints=event_hub_properties) + if self.enable_file_upload_notifications: + iothub_property.enable_file_upload_notifications = self.enable_file_upload_notifications + if self.ip_filters: + iothub_property.ip_filter_rules = self.construct_ip_filters() + routing_endpoints = None + routes = None + if self.routing_endpoints: + routing_endpoints = self.construct_routing_endpoint(self.routing_endpoints) + if self.routes: + routes = [self.construct_route(x) for x in self.routes] + if routes or routing_endpoints: + routing_property = self.IoThub_models.RoutingProperties(endpoints=routing_endpoints, + routes=routes) + iothub_property.routing = routing_property + iothub = self.IoThub_models.IotHubDescription(location=self.location, + sku=self.IoThub_models.IotHubSkuInfo(name=self.sku, capacity=self.unit), + properties=iothub_property, + tags=self.tags) + if not self.check_mode: + iothub = self.create_or_update_hub(iothub) + else: + # compare sku + original_sku = iothub.sku + if self.sku and self.sku != original_sku.name: + self.log('SKU changed') + iothub.sku.name = self.sku + changed = True + if self.unit and self.unit != original_sku.capacity: + self.log('Unit count changed') + iothub.sku.capacity = self.unit + changed = True + # compare event hub property + event_hub = iothub.properties.event_hub_endpoints or dict() + if self.event_endpoint: + item = self.event_endpoint + original_item = event_hub.get('events') + if not original_item: + changed = True + event_hub['events'] = self.IoThub_models.EventHubProperties(partition_count=item.get('partition_count') or 2, + retention_time_in_days=item.get('retention_time_in_days') or 1) + elif item.get('partition_count') and original_item.partition_count != item['partition_count']: + changed = True + original_item.partition_count = item['partition_count'] + elif item.get('retention_time_in_days') and original_item.retention_time_in_days != item['retention_time_in_days']: + changed = True + original_item.retention_time_in_days = item['retention_time_in_days'] + # compare endpoint + original_endpoints = iothub.properties.routing.endpoints + endpoint_changed = False + if self.routing_endpoints: + # find the total length + total_length = 0 + for item in routing_endpoints_resource_type_mapping.values(): + attribute = item['attribute'] + array = getattr(original_endpoints, attribute) + total_length += len(array or []) + if total_length != len(self.routing_endpoints): + endpoint_changed = True + else: # If already changed, no need to compare any more + for item in self.routing_endpoints: + if not self.lookup_endpoint(item, original_endpoints): + endpoint_changed = True + break + if endpoint_changed: + iothub.properties.routing.endpoints = self.construct_routing_endpoint(self.routing_endpoints) + changed = True + # compare routes + original_routes = iothub.properties.routing.routes + routes_changed = False + if self.routes: + if len(self.routes) != len(original_routes or []): + routes_changed = True + else: + for item in self.routes: + if not self.lookup_route(item, original_routes): + routes_changed = True + break + if routes_changed: + changed = True + iothub.properties.routing.routes = [self.construct_route(x) for x in self.routes] + # compare IP filter + ip_filter_changed = False + original_ip_filter = iothub.properties.ip_filter_rules + if self.ip_filters: + if len(self.ip_filters) != len(original_ip_filter or []): + ip_filter_changed = True + else: + for item in self.ip_filters: + if not self.lookup_ip_filter(item, original_ip_filter): + ip_filter_changed = True + break + if ip_filter_changed: + changed = True + iothub.properties.ip_filter_rules = self.construct_ip_filters() + + # compare tags + tag_changed, updated_tags = self.update_tags(iothub.tags) + iothub.tags = updated_tags + if changed and not self.check_mode: + iothub = self.create_or_update_hub(iothub) + # only tags changed + if not changed and tag_changed: + changed = True + if not self.check_mode: + iothub = self.update_instance_tags(updated_tags) + self.results = self.to_dict(iothub) + elif iothub: + changed = True + if not self.check_mode: + self.delete_hub() + self.results['changed'] = changed + return self.results + + def lookup_ip_filter(self, target, ip_filters): + if not ip_filters or len(ip_filters) == 0: + return False + for item in ip_filters: + if item.filter_name == target['name']: + if item.ip_mask != target['ip_mask']: + return False + if item.action.lower() != target['action']: + return False + return True + return False + + def lookup_route(self, target, routes): + if not routes or len(routes) == 0: + return False + for item in routes: + if item.name == target['name']: + if target['source'] != _camel_to_snake(item.source): + return False + if target['enabled'] != item.is_enabled: + return False + if target['endpoint_name'] != item.endpoint_names[0]: + return False + if target.get('condition') and target['condition'] != item.condition: + return False + return True + return False + + def lookup_endpoint(self, target, routing_endpoints): + resource_type = target['resource_type'] + attribute = routing_endpoints_resource_type_mapping[resource_type]['attribute'] + endpoints = getattr(routing_endpoints, attribute) + if not endpoints or len(endpoints) == 0: + return False + for item in endpoints: + if item.name == target['name']: + if target.get('resource_group') and target['resource_group'] != (item.resource_group or self.resource_group): + return False + if target.get('subscription_id') and target['subscription_id'] != (item.subscription_id or self.subscription_id): + return False + connection_string_regex = item.connection_string.replace('****', '.*') + connection_string_regex = re.sub(r':\d+/;', '/;', connection_string_regex) + if not re.search(connection_string_regex, target['connection_string']): + return False + if resource_type == 'storage': + if target.get('container') and item.container_name != target['container']: + return False + if target.get('encoding') and item.encoding != target['encoding']: + return False + return True + return False + + def construct_ip_filters(self): + return [self.IoThub_models.IpFilterRule(filter_name=x['name'], + action=self.IoThub_models.IpFilterActionType[x['action']], + ip_mask=x['ip_mask']) for x in self.ip_filters] + + def construct_routing_endpoint(self, routing_endpoints): + if not routing_endpoints or len(routing_endpoints) == 0: + return None + result = self.IoThub_models.RoutingEndpoints() + for endpoint in routing_endpoints: + resource_type_property = routing_endpoints_resource_type_mapping.get(endpoint['resource_type']) + resource_type = getattr(self.IoThub_models, resource_type_property['model']) + array = getattr(result, resource_type_property['attribute']) or [] + array.append(resource_type(**endpoint)) + setattr(result, resource_type_property['attribute'], array) + return result + + def construct_route(self, route): + if not route: + return None + return self.IoThub_models.RouteProperties(name=route['name'], + source=_snake_to_camel(snake=route['source'], capitalize_first=True), + is_enabled=route['enabled'], + endpoint_names=[route['endpoint_name']], + condition=route.get('condition')) + + def get_hub(self): + try: + return self.IoThub_client.iot_hub_resource.get(self.resource_group, self.name) + except Exception: + return None + + def create_or_update_hub(self, hub): + try: + poller = self.IoThub_client.iot_hub_resource.begin_create_or_update(self.resource_group, self.name, hub, if_match=hub.etag) + return self.get_poller_result(poller) + except Exception as exc: + self.fail('Error creating or updating IoT Hub {0}: {1}'.format(self.name, exc.message or str(exc))) + + def update_instance_tags(self, tags): + try: + poller = self.IoThub_client.iot_hub_resource.begin_update(self.resource_group, self.name, tags=tags) + return self.get_poller_result(poller) + except Exception as exc: + self.fail('Error updating IoT Hub {0}\'s tag: {1}'.format(self.name, exc.message or str(exc))) + + def delete_hub(self): + try: + self.IoThub_client.iot_hub_resource.begin_delete(self.resource_group, self.name) + return True + except Exception as exc: + self.fail('Error deleting IoT Hub {0}: {1}'.format(self.name, exc.message or str(exc))) + return False + + def route_to_dict(self, route): + return dict( + name=route.name, + source=_camel_to_snake(route.source), + endpoint_name=route.endpoint_names[0], + enabled=route.is_enabled, + condition=route.condition + ) + + def instance_dict_to_dict(self, instance_dict): + result = dict() + if not instance_dict: + return result + for key in instance_dict.keys(): + result[key] = instance_dict[key].as_dict() + return result + + def to_dict(self, hub): + result = dict() + properties = hub.properties + result['id'] = hub.id + result['name'] = hub.name + result['resource_group'] = self.resource_group + result['location'] = hub.location + result['tags'] = hub.tags + result['unit'] = hub.sku.capacity + result['sku'] = hub.sku.name.lower() + result['cloud_to_device'] = dict( + max_delivery_count=properties.cloud_to_device.feedback.max_delivery_count, + ttl_as_iso8601=str(properties.cloud_to_device.feedback.ttl_as_iso8601) + ) if properties.cloud_to_device else dict() + result['enable_file_upload_notifications'] = properties.enable_file_upload_notifications + result['event_endpoint'] = properties.event_hub_endpoints.get('events').as_dict() if properties.event_hub_endpoints.get('events') else None + result['host_name'] = properties.host_name + result['ip_filters'] = [x.as_dict() for x in properties.ip_filter_rules] + if properties.routing: + result['routing_endpoints'] = properties.routing.endpoints.as_dict() + result['routes'] = [self.route_to_dict(x) for x in properties.routing.routes] + result['fallback_route'] = self.route_to_dict(properties.routing.fallback_route) + result['status'] = properties.state + result['storage_endpoints'] = self.instance_dict_to_dict(properties.storage_endpoints) + return result + + +def main(): + AzureRMIoTHub() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_iothub_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_iothub_info.py new file mode 100644 index 000000000..22b2335f4 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_iothub_info.py @@ -0,0 +1,614 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Yuwei Zhou, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_iothub_info + +version_added: "0.1.2" + +short_description: Get IoT Hub facts + +description: + - Get facts for a specific IoT Hub or all IoT Hubs. + +options: + name: + description: + - Limit results to a specific resource group. + type: str + resource_group: + description: + - The resource group to search for the desired IoT Hub. + type: str + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + show_stats: + description: + - Show the statistics for IoT Hub. + - Note this will have network overhead for each IoT Hub. + type: bool + show_quota_metrics: + description: + - Get the quota metrics for an IoT hub. + - Note this will have network overhead for each IoT Hub. + type: bool + show_endpoint_health: + description: + - Get the health for routing endpoints. + - Note this will have network overhead for each IoT Hub. + type: bool + test_route_message: + description: + - Test routes message. It will be used to test all routes. + type: str + list_consumer_groups: + description: + - List the consumer group of the built-in event hub. + type: bool + list_keys: + description: + - List the keys of IoT Hub. + - Note this will have network overhead for each IoT Hub. + type: bool +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Yuwei Zhou (@yuwzho) +''' + +EXAMPLES = ''' + - name: Get facts for one IoT Hub + azure_rm_iothub_info: + name: Testing + resource_group: myResourceGroup + + - name: Get facts for all IoT Hubs + azure_rm_iothub_info: + + - name: Get facts for all IoT Hubs in a specific resource group + azure_rm_iothub_info: + resource_group: myResourceGroup + + - name: Get facts by tags + azure_rm_iothub_info: + tags: + - testing +''' + +RETURN = ''' +azure_iothubs: + description: + - List of IoT Hub dicts. + returned: always + type: complex + contains: + id: + description: + - Resource ID of the IoT hub. + type: str + returned: always + sample: "/subscriptions/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/resourceGroups/myResourceGroup/providers/Microsoft.Devices/IotHubs/Testing" + name: + description: + - Name of the IoT hub. + type: str + returned: always + sample: Testing + resource_group: + description: + - Resource group of the IoT hub. + type: str + returned: always + sample: myResourceGroup. + location: + description: + - Location of the IoT hub. + type: str + returned: always + sample: eastus + unit: + description: + - Units in the IoT Hub. + type: int + returned: always + sample: 1 + sku: + description: + - Pricing tier for Azure IoT Hub. + type: str + returned: always + sample: f1 + cloud_to_device: + description: + - Cloud to device message properties. + type: complex + returned: always + contains: + max_delivery_count: + description: + - The number of times the IoT hub attempts to deliver a message on the feedback queue. + - "See U(https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages)." + type: int + returned: always + sample: 10 + ttl_as_iso8601: + description: + - The period of time for which a message is available to consume before it is expired by the IoT hub. + - "See U(https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages)." + type: str + returned: always + sample: "1:00:00" + enable_file_upload_notifications: + description: + - Whether file upload notifications are enabled. + type: str + returned: always + sample: True + event_endpoints: + description: + - Built-in endpoint where to deliver device message. + type: complex + returned: always + contains: + endpoint: + description: + - The Event Hub-compatible endpoint. + type: str + returned: always + sample: "sb://iothub-ns-testing-1478811-9bbc4a15f0.servicebus.windows.net/" + partition_count: + description: + - The number of partitions for receiving device-to-cloud messages in the Event Hub-compatible endpoint. + - "See U(https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages)." + type: int + returned: always + sample: 2 + retention_time_in_days: + description: + - The retention time for device-to-cloud messages in days. + - "See U(https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages)." + type: int + returned: always + sample: 1 + partition_ids: + description: + - List of the partition id for the event endpoint. + type: list + returned: always + sample: ["0", "1"] + host_name: + description: + - Host of the IoT hub. + type: str + returned: always + sample: "testing.azure-devices.net" + ip_filters: + description: + - Configure rules for rejecting or accepting traffic from specific IPv4 addresses. + type: complex + returned: always + contains: + name: + description: + - Name of the filter. + type: str + returned: always + sample: filter + ip_mask: + description: + - A string that contains the IP address range in CIDR notation for the rule. + type: str + returned: always + sample: 40.54.7.3 + action: + description: + - The desired action for requests captured by this rule. + type: str + returned: always + sample: Reject + routing_endpoints: + description: + - Custom endpoints. + type: complex + returned: always + contains: + event_hubs: + description: + - List of custom endpoints of event hubs. + type: complex + returned: always + contains: + name: + description: + - Name of the custom endpoint. + type: str + returned: always + sample: foo + resource_group: + description: + - Resource group of the endpoint. + type: str + returned: always + sample: bar + subscription: + description: + - Subscription ID of the endpoint. + type: str + returned: always + sample: "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX" + connection_string: + description: + - Connection string of the custom endpoint. + type: str + returned: always + sample: "Endpoint=sb://quux.servicebus.windows.net:5671/;SharedAccessKeyName=qux;SharedAccessKey=****;EntityPath=foo" + service_bus_queues: + description: + - List of custom endpoints of service bus queue. + type: complex + returned: always + contains: + name: + description: + - Name of the custom endpoint. + type: str + returned: always + sample: foo + resource_group: + description: + - Resource group of the endpoint. + type: str + returned: always + sample: bar + subscription: + description: + - Subscription ID of the endpoint. + type: str + returned: always + sample: "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX" + connection_string: + description: + - Connection string of the custom endpoint. + type: str + returned: always + sample: "Endpoint=sb://quux.servicebus.windows.net:5671/;SharedAccessKeyName=qux;SharedAccessKey=****;EntityPath=foo" + service_bus_topics: + description: + - List of custom endpoints of service bus topic. + type: complex + returned: always + contains: + name: + description: + - Name of the custom endpoint. + type: str + returned: always + sample: foo + resource_group: + description: + - Resource group of the endpoint. + type: str + returned: always + sample: bar + subscription: + description: + - Subscription ID of the endpoint. + type: str + returned: always + sample: "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX" + connection_string: + description: + - Connection string of the custom endpoint. + type: str + returned: always + sample: "Endpoint=sb://quux.servicebus.windows.net:5671/;SharedAccessKeyName=qux;SharedAccessKey=****;EntityPath=foo" + storage_containers: + description: + - List of custom endpoints of storage. + type: complex + returned: always + contains: + name: + description: + - Name of the custom endpoint. + type: str + returned: always + sample: foo + resource_group: + description: + - Resource group of the endpoint. + type: str + returned: always + sample: bar + subscription: + description: + - Subscription ID of the endpoint. + type: str + returned: always + sample: "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX" + connection_string: + description: + - Connection string of the custom endpoint. + type: str + returned: always + sample: "Endpoint=sb://quux.servicebus.windows.net:5671/;SharedAccessKeyName=qux;SharedAccessKey=****;EntityPath=foo" + routes: + description: + - Route device-to-cloud messages to service-facing endpoints. + type: complex + returned: always + contains: + name: + description: + - Name of the route. + type: str + returned: always + sample: route1 + source: + description: + - The origin of the data stream to be acted upon. + type: str + returned: always + sample: device_messages + enabled: + description: + - Whether to enable the route. + type: bool + returned: always + sample: true + endpoint_name: + description: + - The name of the endpoint in I(routing_endpoints) where IoT Hub sends messages that match the query. + type: str + returned: always + sample: foo + condition: + description: + - "The query expression for the routing query that is run against the message application properties, + system properties, message body, device twin tags, and device twin properties to determine if it is a match for the endpoint." + - "For more information about constructing a query, + see U(https://docs.microsoft.com/en-us/azure/iot-hub/iot-hub-devguide-routing-query-syntax)" + type: bool + returned: always + sample: "true" + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: dict + returned: always + sample: { 'key1': 'value1' } +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +from ansible.module_utils.common.dict_transformations import _camel_to_snake + +try: + from msrestazure.tools import parse_resource_id +except Exception: + # handled in azure_rm_common + pass + + +class AzureRMIoTHubFacts(AzureRMModuleBase): + """Utility class to get IoT Hub facts""" + + def __init__(self): + + self.module_args = dict( + name=dict(type='str'), + resource_group=dict(type='str'), + tags=dict(type='list', elements='str'), + show_stats=dict(type='bool'), + show_quota_metrics=dict(type='bool'), + show_endpoint_health=dict(type='bool'), + list_keys=dict(type='bool'), + test_route_message=dict(type='str'), + list_consumer_groups=dict(type='bool') + ) + + self.results = dict( + changed=False, + azure_iothubs=[] + ) + + self.name = None + self.resource_group = None + self.tags = None + self.show_stats = None + self.show_quota_metrics = None + self.show_endpoint_health = None + self.list_keys = None + self.test_route_message = None + self.list_consumer_groups = None + + super(AzureRMIoTHubFacts, self).__init__( + derived_arg_spec=self.module_args, + supports_check_mode=True, + supports_tags=False, + facts_module=True + ) + + def exec_module(self, **kwargs): + + for key in self.module_args: + setattr(self, key, kwargs[key]) + + response = [] + if self.name: + response = self.get_item() + elif self.resource_group: + response = self.list_by_resource_group() + else: + response = self.list_all() + self.results['iothubs'] = [self.to_dict(x) for x in response if self.has_tags(x.tags, self.tags)] + return self.results + + def get_item(self): + """Get a single IoT Hub""" + + self.log('Get properties for {0}'.format(self.name)) + + item = None + + try: + item = self.IoThub_client.iot_hub_resource.get(self.resource_group, self.name) + return [item] + except Exception as exc: + self.fail('Error when getting IoT Hub {0}: {1}'.format(self.name, exc.message or str(exc))) + + def list_all(self): + """Get all IoT Hubs""" + + self.log('List all IoT Hubs') + + try: + return self.IoThub_client.iot_hub_resource.list_by_subscription() + except Exception as exc: + self.fail('Failed to list all IoT Hubs - {0}'.format(str(exc))) + + def list_by_resource_group(self): + try: + return self.IoThub_client.iot_hub_resource.list_by_resource_group(self.resource_group) + except Exception as exc: + self.fail('Failed to list IoT Hub in resource group {0} - {1}'.format(self.resource_group, exc.message or str(exc))) + + def show_hub_stats(self, resource_group, name): + try: + return self.IoThub_client.iot_hub_resource.get_stats(resource_group, name).as_dict() + except Exception as exc: + self.fail('Failed to getting statistics for IoT Hub {0}/{1}: {2}'.format(resource_group, name, str(exc))) + + def show_hub_quota_metrics(self, resource_group, name): + result = [] + try: + resp = self.IoThub_client.iot_hub_resource.get_quota_metrics(resource_group, name) + while True: + result.append(resp.next().as_dict()) + except StopIteration: + pass + except Exception as exc: + self.fail('Failed to getting quota metrics for IoT Hub {0}/{1}: {2}'.format(resource_group, name, str(exc))) + return result + + def show_hub_endpoint_health(self, resource_group, name): + result = [] + try: + resp = self.IoThub_client.iot_hub_resource.get_endpoint_health(resource_group, name) + while True: + result.append(resp.next().as_dict()) + except StopIteration: + pass + except Exception as exc: + self.fail('Failed to getting health for IoT Hub {0}/{1} routing endpoint: {2}'.format(resource_group, name, str(exc))) + return result + + def test_all_routes(self, resource_group, name): + try: + return self.IoThub_client.iot_hub_resource.test_all_routes(self.test_route_message, resource_group, name).routes.as_dict() + except Exception as exc: + self.fail('Failed to getting statistics for IoT Hub {0}/{1}: {2}'.format(resource_group, name, str(exc))) + + def list_hub_keys(self, resource_group, name): + result = [] + try: + resp = self.IoThub_client.iot_hub_resource.list_keys(resource_group, name) + while True: + result.append(resp.next().as_dict()) + except StopIteration: + pass + except Exception as exc: + self.fail('Failed to getting health for IoT Hub {0}/{1} routing endpoint: {2}'.format(resource_group, name, str(exc))) + return result + + def list_event_hub_consumer_groups(self, resource_group, name, event_hub_endpoint='events'): + result = [] + try: + resp = self.IoThub_client.iot_hub_resource.list_event_hub_consumer_groups(resource_group, name, event_hub_endpoint) + while True: + cg = resp.next() + result.append(dict( + id=cg.id, + name=cg.name + )) + except StopIteration: + pass + except Exception as exc: + self.fail('Failed to listing consumer group for IoT Hub {0}/{1} routing endpoint: {2}'.format(resource_group, name, str(exc))) + return result + + def route_to_dict(self, route): + return dict( + name=route.name, + source=_camel_to_snake(route.source), + endpoint_name=route.endpoint_names[0], + enabled=route.is_enabled, + condition=route.condition + ) + + def instance_dict_to_dict(self, instance_dict): + result = dict() + for key in instance_dict.keys(): + result[key] = instance_dict[key].as_dict() + return result + + def to_dict(self, hub): + result = dict() + properties = hub.properties + result['id'] = hub.id + result['name'] = hub.name + result['resource_group'] = parse_resource_id(hub.id).get('resource_group') + result['location'] = hub.location + result['tags'] = hub.tags + result['unit'] = hub.sku.capacity + result['sku'] = hub.sku.name.lower() + result['cloud_to_device'] = dict( + max_delivery_count=properties.cloud_to_device.feedback.max_delivery_count, + ttl_as_iso8601=str(properties.cloud_to_device.feedback.ttl_as_iso8601) + ) + result['enable_file_upload_notifications'] = properties.enable_file_upload_notifications + result['event_hub_endpoints'] = self.instance_dict_to_dict(properties.event_hub_endpoints) + result['host_name'] = properties.host_name + result['ip_filters'] = [x.as_dict() for x in properties.ip_filter_rules] + result['routing_endpoints'] = properties.routing.endpoints.as_dict() + result['routes'] = [self.route_to_dict(x) for x in properties.routing.routes] + result['fallback_route'] = self.route_to_dict(properties.routing.fallback_route) + result['status'] = properties.state + result['storage_endpoints'] = self.instance_dict_to_dict(properties.storage_endpoints) + + # network overhead part + if self.show_stats: + result['statistics'] = self.show_hub_stats(result['resource_group'], hub.name) + if self.show_quota_metrics: + result['quota_metrics'] = self.show_hub_quota_metrics(result['resource_group'], hub.name) + if self.show_endpoint_health: + result['endpoint_health'] = self.show_hub_endpoint_health(result['resource_group'], hub.name) + if self.list_keys: + result['keys'] = self.list_hub_keys(result['resource_group'], hub.name) + if self.test_route_message: + result['test_route_result'] = self.test_all_routes(result['resource_group'], hub.name) + if self.list_consumer_groups: + result['consumer_groups'] = self.list_event_hub_consumer_groups(result['resource_group'], hub.name) + return result + + +def main(): + """Main module execution code path""" + + AzureRMIoTHubFacts() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_iothubconsumergroup.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_iothubconsumergroup.py new file mode 100644 index 000000000..21dde7fbb --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_iothubconsumergroup.py @@ -0,0 +1,163 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Yuwei Zhou, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_iothubconsumergroup +version_added: "0.1.2" +short_description: Manage Azure IoT hub +description: + - Create, delete an Azure IoT hub. +options: + resource_group: + description: + - Name of resource group. + type: str + required: true + hub: + description: + - Name of the IoT hub. + type: str + required: true + state: + description: + - State of the IoT hub. Use C(present) to create or update an IoT hub and C(absent) to delete an IoT hub. + type: str + default: present + choices: + - absent + - present + event_hub: + description: + - Event hub endpoint name. + type: str + default: events + name: + description: + - Name of the consumer group. + type: str +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Yuwei Zhou (@yuwzho) + +''' + +EXAMPLES = ''' +- name: Create an IoT hub consumer group + azure_rm_iothubconsumergroup: + name: test + resource_group: myResourceGroup + hub: Testing +''' + +RETURN = ''' +id: + description: + - Resource ID of the consumer group. + returned: success + type: str + sample: "/subscriptions/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/resourceGroups/myResourceGroup + /providers/Microsoft.Devices/IotHubs/Testing/events/ConsumerGroups/%24Default" +name: + description: + - Name of the consumer group. + sample: Testing + returned: success + type: str +''' # NOQA + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id +from ansible.module_utils.common.dict_transformations import _snake_to_camel, _camel_to_snake +import re + +try: + from msrestazure.tools import parse_resource_id +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMIoTHubConsumerGroup(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True), + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + hub=dict(type='str', required=True), + event_hub=dict(type='str', default='events') + ) + + self.results = dict( + changed=False, + id=None + ) + + self.resource_group = None + self.name = None + self.state = None + self.hub = None + self.event_hub = None + + super(AzureRMIoTHubConsumerGroup, self).__init__(self.module_arg_spec, supports_check_mode=True) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec.keys(): + setattr(self, key, kwargs[key]) + + changed = False + cg = self.get_cg() + if not cg and self.state == 'present': + changed = True + if not self.check_mode: + cg = self.create_cg() + elif cg and self.state == 'absent': + changed = True + cg = None + if not self.check_mode: + self.delete_cg() + self.results = dict( + id=cg.id, + name=cg.name + ) if cg else dict() + self.results['changed'] = changed + return self.results + + def get_cg(self): + try: + return self.IoThub_client.iot_hub_resource.get_event_hub_consumer_group(self.resource_group, self.hub, self.event_hub, self.name) + except Exception: + pass + return None + + def create_cg(self): + try: + return self.IoThub_client.iot_hub_resource.create_event_hub_consumer_group(self.resource_group, self.hub, self.event_hub, self.name) + except Exception as exc: + self.fail('Error when creating the consumer group {0} for IoT Hub {1} event hub {2}: {3}'.format(self.name, self.hub, self.event_hub, str(exc))) + + def delete_cg(self): + try: + return self.IoThub_client.iot_hub_resource.delete_event_hub_consumer_group(self.resource_group, self.hub, self.event_hub, self.name) + except Exception as exc: + self.fail('Error when deleting the consumer group {0} for IoT Hub {1} event hub {2}: {3}'.format(self.name, self.hub, self.event_hub, str(exc))) + + +def main(): + AzureRMIoTHubConsumerGroup() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_ipgroup.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_ipgroup.py new file mode 100644 index 000000000..717923770 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_ipgroup.py @@ -0,0 +1,313 @@ +#!/usr/bin/python +# +# Copyright (c) 2021 Aparna Patil(@techcon65) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_ipgroup + +version_added: "1.6.0" + +short_description: Create, delete and update IP group + +description: + - Creates, deletes, and updates IP group in specified resource group. + +options: + resource_group: + description: + - Name of the resource group. + required: true + type: str + name: + description: + - The name of the IP group. + required: true + type: str + location: + description: + - Location for IP group. Defaults to location of resource group if not specified. + type: str + ip_addresses: + description: + - The List of IP addresses in IP group. + type: list + elements: str + state: + description: + - Assert the state of the IP group. Use C(present) to create or update and C(absent) to delete. + default: present + type: str + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Aparna Patil (@techcon65) +''' + +EXAMPLES = ''' +- name: Create IP Group + azure_rm_ipgroup: + resource_group: MyAzureResourceGroup + name: myipgroup + location: eastus + ip_addresses: + - 13.64.39.16/32 + - 40.74.146.80/31 + - 40.74.147.32/28 + tags: + key1: "value1" + state: present + +- name: Update IP Group + azure_rm_ipgroup: + resource_group: MyAzureResourceGroup + name: myipgroup + location: eastus + ip_addresses: + - 10.0.0.0/24 + tags: + key2: "value2" + +- name: Delete IP Group + azure_rm_ipgroup: + resource_group: MyAzureResourceGroup + name: myipgroup + state: absent +''' + +RETURN = ''' +state: + description: + - Current state of the IP group. + returned: always + type: complex + contains: + id: + description: + - The IP group ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/MyAzureResourceGroup/providers/ + Microsoft.Network/ipGroups/myipgroup" + name: + description: + - The IP group name. + returned: always + type: str + sample: 'myipgroup' + location: + description: + - The Azure Region where the resource lives. + returned: always + type: str + sample: eastus + ip_addresses: + description: + - The list of IP addresses in IP group. + returned: always + type: list + elements: str + sample: [ + "13.64.39.16/32", + "40.74.146.80/31", + "40.74.147.32/28" + ] + provisioning_state: + description: + - The provisioning state of the resource. + returned: always + type: str + sample: Succeeded + firewalls: + description: + - List of references to Firewall resources that this IpGroups is associated with. + returned: always + type: list + elements: dict + sample: [ + { + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myAzureResourceGroup/providers/ + Microsoft.Network/azureFirewalls/azurefirewall" + } + ] + tags: + description: + - Resource tags. + returned: always + type: list + sample: [{"key1": "value1"}] + type: + description: + - The type of resource. + returned: always + type: str + sample: Microsoft.Network/IpGroups + etag: + description: + - The etag of the IP group. + returned: always + type: str + sample: c67388ea-6dab-481b-9387-bd441c0d32f8 +''' + +from ansible.module_utils.basic import _load_params +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase, HAS_AZURE, \ + format_resource_id, normalize_location_name + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.core.polling import LROPoller +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMIPGroup(AzureRMModuleBase): + + def __init__(self): + + _load_params() + # define user inputs from playbook + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True), + name=dict(type='str', required=True), + location=dict(type='str'), + ip_addresses=dict(type='list', elements='str'), + state=dict(choices=['present', 'absent'], default='present', type='str') + ) + + self.results = dict( + changed=False, + state=dict() + ) + + self.resource_group = None + self.name = None + self.state = None + self.location = None + self.ip_addresses = None + self.tags = None + + super(AzureRMIPGroup, self).__init__(self.module_arg_spec, + supports_check_mode=True) + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + changed = False + results = dict() + ip_group_old = None + ip_group_new = None + + # retrieve resource group to make sure it exists + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + # Set default location + self.location = resource_group.location + + self.location = normalize_location_name(self.location) + + try: + self.log('Fetching IP group {0}'.format(self.name)) + ip_group_old = self.network_client.ip_groups.get(self.resource_group, self.name) + # serialize object into a dictionary + results = self.ipgroup_to_dict(ip_group_old) + if self.state == 'present': + changed = False + update_tags, results['tags'] = self.update_tags(results['tags']) + if update_tags: + changed = True + self.tags = results['tags'] + update_ip_address = self.ip_addresses_changed(self.ip_addresses, results['ip_addresses']) + if update_ip_address: + changed = True + results['ip_addresses'] = self.ip_addresses + elif self.state == 'absent': + changed = True + + except ResourceNotFoundError: + if self.state == 'present': + changed = True + else: + changed = False + + self.results['changed'] = changed + self.results['state'] = results + + if self.check_mode: + return self.results + + if changed: + if self.state == 'present': + # create or update ip group + ip_group_new = \ + self.network_models.IpGroup(location=self.location, + ip_addresses=self.ip_addresses) + if self.tags: + ip_group_new.tags = self.tags + self.results['state'] = self.create_or_update_ipgroup(ip_group_new) + + elif self.state == 'absent': + # delete ip group + self.delete_ipgroup() + self.results['state'] = 'Deleted' + + return self.results + + def create_or_update_ipgroup(self, ip_group): + try: + # create ip group + response = self.network_client.ip_groups.begin_create_or_update(resource_group_name=self.resource_group, + ip_groups_name=self.name, + parameters=ip_group) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + except Exception as exc: + self.fail("Error creating or updating IP group {0} - {1}".format(self.name, str(exc))) + return self.ipgroup_to_dict(response) + + def delete_ipgroup(self): + try: + # delete ip group + response = self.network_client.ip_groups.begin_delete(resource_group_name=self.resource_group, + ip_groups_name=self.name) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + except Exception as exc: + self.fail("Error deleting IP group {0} - {1}".format(self.name, str(exc))) + return response + + def ip_addresses_changed(self, input_records, ip_group_records): + # comparing IP addresses list + + input_set = set(input_records) + ip_group_set = set(ip_group_records) + + changed = input_set != ip_group_set + + return changed + + def ipgroup_to_dict(self, ipgroup): + result = ipgroup.as_dict() + result['tags'] = ipgroup.tags + return result + + +def main(): + AzureRMIPGroup() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_ipgroup_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_ipgroup_info.py new file mode 100644 index 000000000..3ed6423a6 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_ipgroup_info.py @@ -0,0 +1,201 @@ +#!/usr/bin/python +# +# Copyright (c) 2021 Aparna Patil(@techcon65) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_ipgroup_info + +version_added: "1.6.0" + +short_description: Get IP group facts + +description: + - Get facts for specified IP group or all IP groups in a given resource group. + +options: + resource_group: + description: + - Name of the resource group. + type: str + name: + description: + - Name of the IP group. + type: str + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Aparna Patil (@techcon65) + +''' + +EXAMPLES = ''' +- name: Get facts for one IP group + azure_rm_ipgroup_info: + resource_group: myAzureResourceGroup + name: myipgroup + +- name: Get facts for all IP groups in resource group + azure_rm_ipgroup_info: + resource_group: myAzureResourceGroup +''' + +RETURN = ''' +ipgroups: + description: + - Gets a list of IP groups. + returned: always + type: list + elements: dict + sample: [ + { + "etag": "c67388ea-6dab-481b-9387-bd441c0d32f8", + "firewalls": [], + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/MyAzureResourceGroup/providers/ + Microsoft.Network/ipGroups/myipgroup", + "ip_addresses": [ + "13.64.39.16/32", + "40.74.146.80/31", + "40.74.147.32/28" + ], + "location": "eastus", + "name": "myipgroup", + "provisioning_state": "Succeeded", + "tags": { + "key1": "value1" + } + } + ] +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.common import AzureMissingResourceHttpError, AzureHttpError +except Exception: + # This is handled in azure_rm_common + pass + +AZURE_OBJECT_CLASS = 'IpGroup' + + +class AzureRMIPGroupInfo(AzureRMModuleBase): + + def __init__(self): + + # define user inputs into argument + self.module_arg_spec = dict( + name=dict(type='str'), + resource_group=dict(type='str'), + tags=dict(type='list', elements='str') + ) + + # store the results of the module operation + self.results = dict( + changed=False + ) + + self.name = None + self.resource_group = None + self.tags = None + + super(AzureRMIPGroupInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False, facts_module=True) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + results = [] + # list the conditions and results to return based on user input + if self.name is not None: + # if there is IP group name provided, return facts about that specific IP group + results = self.get_item() + elif self.resource_group: + # all the IP groups listed in specific resource group + results = self.list_resource_group() + else: + # all the IP groups in a subscription + results = self.list_items() + + self.results['ipgroups'] = self.curated_items(results) + + return self.results + + def get_item(self): + self.log('Get properties for {0}'.format(self.name)) + item = None + results = [] + # get specific IP group + try: + item = self.network_client.ip_groups.get(self.resource_group, self.name) + except ResourceNotFoundError: + pass + + # serialize result + if item and self.has_tags(item.tags, self.tags): + results = [item] + return results + + def list_resource_group(self): + self.log('List all IP groups for resource group - {0}'.format(self.resource_group)) + try: + response = self.network_client.ip_groups.list_by_resource_group(self.resource_group) + except ResourceNotFoundError as exc: + self.fail("Failed to list for resource group {0} - {1}".format(self.resource_group, str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(item) + return results + + def list_items(self): + self.log('List all IP groups for a subscription ') + try: + response = self.network_client.ip_groups.list() + except ResourceNotFoundError as exc: + self.fail("Failed to list all items - {0}".format(str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(item) + return results + + def curated_items(self, raws): + return [self.ipgroup_to_dict(item) for item in raws] if raws else [] + + def ipgroup_to_dict(self, ipgroup): + result = dict( + id=ipgroup.id, + name=ipgroup.name, + location=ipgroup.location, + tags=ipgroup.tags, + ip_addresses=ipgroup.ip_addresses, + provisioning_state=ipgroup.provisioning_state, + firewalls=[dict(id=x.id) for x in ipgroup.firewalls], + etag=ipgroup.etag + ) + return result + + +def main(): + AzureRMIPGroupInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_keyvault.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_keyvault.py new file mode 100644 index 000000000..f117b380c --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_keyvault.py @@ -0,0 +1,541 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Zim Kalinowski, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_keyvault +version_added: "0.1.2" +short_description: Manage Key Vault instance +description: + - Create, update and delete instance of Key Vault. + +options: + resource_group: + description: + - The name of the Resource Group to which the server belongs. + required: True + vault_name: + description: + - Name of the vault. + required: True + location: + description: + - Resource location. If not set, location from the resource group will be used as default. + vault_tenant: + description: + - The Azure Active Directory tenant ID that should be used for authenticating requests to the key vault. + sku: + description: + - SKU details. + suboptions: + family: + description: + - SKU family name. + required: True + name: + description: + - SKU name to specify whether the key vault is a standard vault or a premium vault. + required: True + choices: + - 'standard' + - 'premium' + access_policies: + description: + - An array of 0 to 16 identities that have access to the key vault. + - All identities in the array must use the same tenant ID as the key vault's tenant ID. + suboptions: + tenant_id: + description: + - The Azure Active Directory tenant ID that should be used for authenticating requests to the key vault. + - Current keyvault C(tenant_id) value will be used if not specified. + object_id: + description: + - The object ID of a user, service principal or security group in the Azure Active Directory tenant for the vault. + - The object ID must be unique for the list of access policies. + - Please note this is not application id. Object id can be obtained by running "az ad sp show --id ". + required: True + application_id: + description: + - Application ID of the client making request on behalf of a principal. + keys: + description: + - List of permissions to keys. + choices: + - 'encrypt' + - 'decrypt' + - 'wrapkey' + - 'unwrapkey' + - 'sign' + - 'verify' + - 'get' + - 'list' + - 'create' + - 'update' + - 'import' + - 'delete' + - 'backup' + - 'restore' + - 'recover' + - 'purge' + secrets: + description: + - List of permissions to secrets. + choices: + - 'get' + - 'list' + - 'set' + - 'delete' + - 'backup' + - 'restore' + - 'recover' + - 'purge' + certificates: + description: + - List of permissions to certificates. + choices: + - 'get' + - 'list' + - 'delete' + - 'create' + - 'import' + - 'update' + - 'managecontacts' + - 'getissuers' + - 'listissuers' + - 'setissuers' + - 'deleteissuers' + - 'manageissuers' + - 'recover' + - 'purge' + storage: + description: + - List of permissions to storage accounts. + enabled_for_deployment: + description: + - Property to specify whether Azure Virtual Machines are permitted to retrieve certificates stored as secrets from the key vault. + type: bool + enabled_for_disk_encryption: + description: + - Property to specify whether Azure Disk Encryption is permitted to retrieve secrets from the vault and unwrap keys. + type: bool + enabled_for_template_deployment: + description: + - Property to specify whether Azure Resource Manager is permitted to retrieve secrets from the key vault. + type: bool + enable_soft_delete: + description: + - Property to specify whether the soft delete functionality is enabled for this key vault. + type: bool + default: True + enable_purge_protection: + description: + - Property specifying whether protection against purge is enabled for this vault. + type: bool + default: False + soft_delete_retention_in_days: + description: + - Property specifying the number of days to retain deleted vaults. + type: int + recover_mode: + description: + - Create vault in recovery mode. + type: bool + state: + description: + - Assert the state of the KeyVault. Use C(present) to create or update an KeyVault and C(absent) to delete it. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Create instance of Key Vault + azure_rm_keyvault: + resource_group: myResourceGroup + vault_name: samplekeyvault + enabled_for_deployment: yes + vault_tenant: 72f98888-8666-4144-9199-2d7cd0111111 + sku: + name: standard + family: A + access_policies: + - tenant_id: 72f98888-8666-4144-9199-2d7cd0111111 + object_id: 99998888-8666-4144-9199-2d7cd0111111 + keys: + - get + - list +''' + +RETURN = ''' +id: + description: + - The Azure Resource Manager resource ID for the key vault. + returned: always + type: str + sample: id +''' + +import collections +import time +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.polling import LROPoller + from azure.core.exceptions import ResourceNotFoundError + from azure.mgmt.keyvault import KeyVaultManagementClient + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMVaults(AzureRMModuleBase): + """Configuration class for an Azure RM Key Vault resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + vault_name=dict( + type='str', + required=True + ), + location=dict( + type='str' + ), + vault_tenant=dict( + type='str' + ), + sku=dict( + type='dict' + ), + access_policies=dict( + type='list', + elements='dict', + options=dict( + tenant_id=dict(type='str'), + object_id=dict(type='str', required=True), + application_id=dict(type='str'), + # FUTURE: add `choices` support once choices supports lists of values + keys=dict(type='list', no_log=True), + secrets=dict(type='list', no_log=True), + certificates=dict(type='list'), + storage=dict(type='list') + ) + ), + enabled_for_deployment=dict( + type='bool' + ), + enabled_for_disk_encryption=dict( + type='bool' + ), + enabled_for_template_deployment=dict( + type='bool' + ), + enable_soft_delete=dict( + type='bool', + default=True + ), + soft_delete_retention_in_days=dict( + type='int' + ), + enable_purge_protection=dict( + type='bool', + default=False + ), + recover_mode=dict( + type='bool' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.module_required_if = [['state', 'present', ['vault_tenant']]] + + self.resource_group = None + self.vault_name = None + self.parameters = dict() + self.tags = None + + self.results = dict(changed=False) + self.mgmt_client = None + self.state = None + self.to_do = Actions.NoAction + + super(AzureRMVaults, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True, + required_if=self.module_required_if) + + def exec_module(self, **kwargs): + """Main module execution method""" + + # translate Ansible input to SDK-formatted dict in self.parameters + for key in list(self.module_arg_spec.keys()) + ['tags']: + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + if key == "location": + self.parameters["location"] = kwargs[key] + elif key == "vault_tenant": + self.parameters.setdefault("properties", {})["tenant_id"] = kwargs[key] + elif key == "sku": + self.parameters.setdefault("properties", {})["sku"] = kwargs[key] + elif key == "access_policies": + access_policies = kwargs[key] + for policy in access_policies: + if 'keys' in policy: + policy.setdefault("permissions", {})["keys"] = policy["keys"] + policy.pop("keys", None) + if 'secrets' in policy: + policy.setdefault("permissions", {})["secrets"] = policy["secrets"] + policy.pop("secrets", None) + if 'certificates' in policy: + policy.setdefault("permissions", {})["certificates"] = policy["certificates"] + policy.pop("certificates", None) + if 'storage' in policy: + policy.setdefault("permissions", {})["storage"] = policy["storage"] + policy.pop("storage", None) + if policy.get('tenant_id') is None: + # default to key vault's tenant, since that's all that's currently supported anyway + policy['tenant_id'] = kwargs['vault_tenant'] + self.parameters.setdefault("properties", {})["access_policies"] = access_policies + elif key == "enabled_for_deployment": + self.parameters.setdefault("properties", {})["enabled_for_deployment"] = kwargs[key] + elif key == "enabled_for_disk_encryption": + self.parameters.setdefault("properties", {})["enabled_for_disk_encryption"] = kwargs[key] + elif key == "enabled_for_template_deployment": + self.parameters.setdefault("properties", {})["enabled_for_template_deployment"] = kwargs[key] + elif key == "enable_soft_delete": + self.parameters.setdefault("properties", {})["enable_soft_delete"] = kwargs[key] + elif key == "enable_purge_protection": + self.parameters.setdefault("properties", {})["enable_purge_protection"] = kwargs[key] + elif key == "soft_delete_retention_in_days": + self.parameters.setdefault("properties", {})["soft_delete_retention_in_days"] = kwargs[key] + elif key == "recover_mode": + self.parameters.setdefault("properties", {})["create_mode"] = 'recover' if kwargs[key] else 'default' + + old_response = None + response = None + + self.mgmt_client = self.get_mgmt_svc_client(KeyVaultManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager, + is_track2=True, + api_version="2021-10-01") + + resource_group = self.get_resource_group(self.resource_group) + + if "location" not in self.parameters: + self.parameters["location"] = resource_group.location + + old_response = self.get_keyvault() + + if not old_response: + self.log("Key Vault instance doesn't exist") + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + if not self.parameters['properties']['enable_purge_protection']: + self.parameters['properties'].pop('enable_purge_protection') + else: + self.log("Key Vault instance already exists") + if self.state == 'absent': + self.to_do = Actions.Delete + elif self.state == 'present': + self.log("Need to check if Key Vault instance has to be deleted or may be updated") + if not self.parameters['properties']['enable_purge_protection'] and \ + ('enable_purge_protection' not in old_response['properties'] or + not old_response['properties']['enable_purge_protection']): + self.parameters['properties'].pop('enable_purge_protection') + if ('location' in self.parameters) and (self.parameters['location'] != old_response['location']): + self.to_do = Actions.Update + elif (('tenant_id' in self.parameters['properties']) and + (self.parameters['properties']['tenant_id'] != old_response['properties']['tenant_id'])): + self.to_do = Actions.Update + elif (('enabled_for_deployment' in self.parameters['properties']) and + (self.parameters['properties']['enabled_for_deployment'] != old_response['properties'].get('enabled_for_deployment', None))): + self.to_do = Actions.Update + elif (('enabled_for_disk_encryption' in self.parameters['properties']) and + (self.parameters['properties']['enabled_for_disk_encryption'] != + old_response['properties'].get('enabled_for_disk_encryption', None))): + self.to_do = Actions.Update + elif (('enabled_for_template_deployment' in self.parameters['properties']) and + (self.parameters['properties']['enabled_for_template_deployment'] != + old_response['properties'].get('enabled_for_template_deployment', None))): + self.to_do = Actions.Update + elif (('enable_soft_delete' in self.parameters['properties']) and + (self.parameters['properties']['enable_soft_delete'] != old_response['properties'].get('enable_soft_delete', None))): + self.to_do = Actions.Update + elif (('soft_delete_retention_in_days' in self.parameters['properties']) and + (self.parameters['properties']['soft_delete_retention_in_days'] != old_response['properties'].get('soft_delete_retention_in_days'))): + self.to_do = Actions.Update + elif (('enable_purge_protection' in self.parameters['properties']) and + (self.parameters['properties']['enable_purge_protection'] != old_response['properties'].get('enable_purge_protection'))): + self.to_do = Actions.Update + elif ('create_mode' in self.parameters['properties']) and (self.parameters['properties']['create_mode'] == 'recover'): + self.to_do = Actions.Update + elif 'access_policies' in self.parameters['properties']: + if len(self.parameters['properties']['access_policies']) != len(old_response['properties']['access_policies']): + self.to_do = Actions.Update + else: + # FUTURE: this list isn't really order-dependent- we should be set-ifying the rules list for order-independent comparison + for i in range(len(old_response['properties']['access_policies'])): + n = self.parameters['properties']['access_policies'][i] + o = old_response['properties']['access_policies'][i] + if n.get('tenant_id', False) != o.get('tenant_id', False): + self.to_do = Actions.Update + break + if n.get('object_id', None) != o.get('object_id', None): + self.to_do = Actions.Update + break + if n.get('application_id', None) != o.get('application_id', None): + self.to_do = Actions.Update + break + if sorted(n.get('permissions', {}).get('keys', []) or []) != sorted(o.get('permissions', {}).get('keys', []) or []): + self.to_do = Actions.Update + break + if sorted(n.get('permissions', {}).get('secrets', []) or []) != sorted(o.get('permissions', {}).get('secrets', []) or []): + self.to_do = Actions.Update + break + if sorted(n.get('permissions', {}).get('certificates', []) or []) != sorted(o.get('permissions', {}).get('certificates', []) or []): + self.to_do = Actions.Update + break + if sorted(n.get('permissions', {}).get('storage', []) or []) != sorted(o.get('permissions', {}).get('storage', []) or []): + self.to_do = Actions.Update + break + + update_tags, newtags = self.update_tags(old_response.get('tags', dict())) + + if update_tags: + self.to_do = Actions.Update + self.tags = newtags + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log("Need to Create / Update the Key Vault instance") + + if self.check_mode: + self.results['changed'] = True + return self.results + + self.parameters["tags"] = self.tags + + response = self.create_update_keyvault() + + if not old_response: + self.results['changed'] = True + else: + self.results['changed'] = old_response.__ne__(response) + self.log("Creation / Update done") + elif self.to_do == Actions.Delete: + self.log("Key Vault instance deleted") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_keyvault() + # make sure instance is actually deleted, for some Azure resources, instance is hanging around + # for some time after deletion -- this should be really fixed in Azure + while self.get_keyvault(): + time.sleep(20) + else: + self.log("Key Vault instance unchanged") + self.results['changed'] = False + response = old_response + + if response: + self.results["id"] = response["id"] + + return self.results + + def create_update_keyvault(self): + ''' + Creates or updates Key Vault with the specified configuration. + + :return: deserialized Key Vault instance state dictionary + ''' + self.log("Creating / Updating the Key Vault instance {0}".format(self.vault_name)) + + try: + response = self.mgmt_client.vaults.begin_create_or_update(resource_group_name=self.resource_group, + vault_name=self.vault_name, + parameters=self.parameters) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + + except Exception as exc: + self.log('Error attempting to create the Key Vault instance.') + self.fail("Error creating the Key Vault instance: {0}".format(str(exc))) + return response.as_dict() + + def delete_keyvault(self): + ''' + Deletes specified Key Vault instance in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the Key Vault instance {0}".format(self.vault_name)) + try: + response = self.mgmt_client.vaults.delete(resource_group_name=self.resource_group, + vault_name=self.vault_name) + except Exception as e: + self.log('Error attempting to delete the Key Vault instance.') + self.fail("Error deleting the Key Vault instance: {0}".format(str(e))) + + return True + + def get_keyvault(self): + ''' + Gets the properties of the specified Key Vault. + + :return: deserialized Key Vault instance state dictionary + ''' + self.log("Checking if the Key Vault instance {0} is present".format(self.vault_name)) + found = False + try: + response = self.mgmt_client.vaults.get(resource_group_name=self.resource_group, + vault_name=self.vault_name) + found = True + self.log("Response : {0}".format(response)) + self.log("Key Vault instance : {0} found".format(response.name)) + except ResourceNotFoundError as e: + self.log('Did not find the Key Vault instance.') + if found is True: + return response.as_dict() + + return False + + +def main(): + """Main execution""" + AzureRMVaults() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_keyvault_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_keyvault_info.py new file mode 100644 index 000000000..a56a2c378 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_keyvault_info.py @@ -0,0 +1,349 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Yunge Zhu, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_keyvault_info +version_added: "0.1.2" +short_description: Get Azure Key Vault facts +description: + - Get facts of Azure Key Vault. + +options: + resource_group: + description: + - The name of the resource group to which the key vault belongs. + type: str + name: + description: + - The name of the key vault. + type: str + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Yunge Zhu (@yungezz) + +''' + +EXAMPLES = ''' + - name: Get Key Vault by name + azure_rm_keyvault_info: + resource_group: myResourceGroup + name: myVault + + - name: List Key Vaults in specific resource group + azure_rm_keyvault_info: + resource_group: myResourceGroup + + - name: List Key Vaults in current subscription + azure_rm_keyvault_info: +''' + +RETURN = ''' +keyvaults: + description: List of Azure Key Vaults. + returned: always + type: list + contains: + name: + description: + - Name of the vault. + returned: always + type: str + sample: myVault + id: + description: + - Resource Id of the vault. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.KeyVault/vaults/myVault + vault_uri: + description: + - Vault uri. + returned: always + type: str + sample: https://myVault.vault.azure.net/ + location: + description: + - Location of the vault. + returned: always + type: str + sample: eastus + enabled_for_deployments: + description: + - Whether Azure Virtual Machines are permitted to retrieve certificates stored as secrets from the key vault. + returned: always + type: bool + sample: False + enabled_for_disk_encryption: + description: + - Whether Azure Disk Encryption is permitted to retrieve secrets from the vault and unwrap keys. + returned: always + type: bool + sample: False + enabled_for_template_deployment: + description: + - Whether Azure Resource Manager is permitted to retrieve secrets from the key vault. + returned: always + type: bool + sample: False + enable_soft_delete: + description: + - Property to specify whether Azure Resource Manager is permitted to retrieve secrets from the key vault. + type: bool + returned: always + sample: True + enable_purge_protection: + description: + - Property specifying whether protection against purge is enabled for this vault. + type: bool + returned: always + sample: False + soft_delete_retention_in_days: + description: + - Property specifying the number of days to retain deleted vaults. + type: int + returned: always + sample: 90 + tags: + description: + - List of tags. + type: list + sample: + - foo + sku: + description: + - Sku of the vault. + returned: always + type: dict + contains: + family: + description: Sku family name. + type: str + returned: always + sample: A + name: + description: Sku name. + type: str + returned: always + sample: standard + access_policies: + description: + - List of policies. + returned: always + type: list + contains: + object_id: + description: The object if of a user, service principal or security group in AAD for the vault. + type: str + returned: always + sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + tenant_id: + description: The AAD tenant iD that should be used for authenticating requests to the key vault. + type: str + returned: always + sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + permissions: + description: Permissions the identity has for keys, secrets and certificates. + type: complex + returned: always + contains: + keys: + description: + Permissions to keys. + type: list + returned: always + sample: + - get + - create + secrets: + description: + Permissions to secrets. + type: list + returned: always + sample: + - list + - set + certificates: + description: + Permissions to secrets. + type: list + returned: always + sample: + - get + - import +''' + + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.mgmt.keyvault import KeyVaultManagementClient + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + + +def keyvault_to_dict(vault): + return dict( + id=vault.id, + name=vault.name, + location=vault.location, + tags=vault.tags, + vault_uri=vault.properties.vault_uri, + enabled_for_deployment=vault.properties.enabled_for_deployment, + enabled_for_disk_encryption=vault.properties.enabled_for_disk_encryption, + enabled_for_template_deployment=vault.properties.enabled_for_template_deployment, + enable_soft_delete=vault.properties.enable_soft_delete, + soft_delete_retention_in_days=vault.properties.soft_delete_retention_in_days + if vault.properties.soft_delete_retention_in_days else 90, + enable_purge_protection=vault.properties.enable_purge_protection + if vault.properties.enable_purge_protection else False, + access_policies=[dict( + tenant_id=policy.tenant_id, + object_id=policy.object_id, + permissions=dict( + keys=[kp.lower() for kp in policy.permissions.keys] if policy.permissions.keys else None, + secrets=[sp.lower() for sp in policy.permissions.secrets] if policy.permissions.secrets else None, + certificates=[cp.lower() for cp in policy.permissions.certificates] if policy.permissions.certificates else None, + storage=[stp.lower() for stp in policy.permissions.storage] if policy.permissions.storage else None + ) if policy.permissions else None, + ) for policy in vault.properties.access_policies] if vault.properties.access_policies else None, + sku=dict( + family=vault.properties.sku.family, + name=vault.properties.sku.name + ) + ) + + +class AzureRMKeyVaultInfo(AzureRMModuleBase): + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict(type='str'), + name=dict(type='str'), + tags=dict(type='list', elements='str') + ) + + self.resource_group = None + self.name = None + self.tags = None + + self.results = dict(changed=False) + self._client = None + + super(AzureRMKeyVaultInfo, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + facts_module=True) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()) + ['tags']: + if hasattr(self, key): + setattr(self, key, kwargs[key]) + + self._client = self.get_mgmt_svc_client(KeyVaultManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager, + is_track2=True, + api_version="2021-10-01") + + if self.name: + if self.resource_group: + self.results['keyvaults'] = self.get_by_name() + else: + self.fail("resource_group is required when filtering by name") + elif self.resource_group: + self.results['keyvaults'] = self.list_by_resource_group() + else: + self.results['keyvaults'] = self.list() + + return self.results + + def get_by_name(self): + ''' + Gets the properties of the specified key vault. + + :return: deserialized key vaultstate dictionary + ''' + self.log("Get the key vault {0}".format(self.name)) + + results = [] + try: + response = self._client.vaults.get(resource_group_name=self.resource_group, + vault_name=self.name) + self.log("Response : {0}".format(response)) + + if response and self.has_tags(response.tags, self.tags): + results.append(keyvault_to_dict(response)) + except ResourceNotFoundError as e: + self.log("Did not find the key vault {0}: {1}".format(self.name, str(e))) + return results + + def list_by_resource_group(self): + ''' + Lists the properties of key vaults in specific resource group. + + :return: deserialized key vaults state dictionary + ''' + self.log("Get the key vaults in resource group {0}".format(self.resource_group)) + + results = [] + try: + response = list(self._client.vaults.list_by_resource_group(resource_group_name=self.resource_group)) + self.log("Response : {0}".format(response)) + + if response: + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(keyvault_to_dict(item)) + except Exception as e: + self.log("Did not find key vaults in resource group {0} : {1}.".format(self.resource_group, str(e))) + return results + + def list(self): + ''' + Lists the properties of key vaults in specific subscription. + + :return: deserialized key vaults state dictionary + ''' + self.log("Get the key vaults in current subscription") + + results = [] + try: + response = list(self._client.vaults.list()) + self.log("Response : {0}".format(response)) + + if response: + for item in response: + if self.has_tags(item.tags, self.tags): + source_id = item.id.split('/') + results.append(keyvault_to_dict(self._client.vaults.get(source_id[4], source_id[8]))) + except Exception as e: + self.log("Did not find key vault in current subscription {0}.".format(str(e))) + return results + + +def main(): + """Main execution""" + AzureRMKeyVaultInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_keyvaultkey.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_keyvaultkey.py new file mode 100644 index 000000000..985775b9a --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_keyvaultkey.py @@ -0,0 +1,370 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_keyvaultkey +version_added: "0.1.2" +short_description: Use Azure KeyVault keys +description: + - Create or delete a key within a given keyvault. + - By using Key Vault, you can encrypt keys and secrets. + - Such as authentication keys, storage account keys, data encryption keys, .PFX files, and passwords. +options: + keyvault_uri: + description: + - URI of the keyvault endpoint. + required: true + key_name: + description: + - Name of the keyvault key. + required: true + key_type: + description: + - The type of key to create. For valid values, see JsonWebKeyType. Possible values include EC, EC-HSM, RSA, RSA-HSM, oct + default: 'RSA' + key_size: + description: + - The key size in bits. For example 2048, 3072, or 4096 for RSA. + key_attributes: + description: + - The attributes of a key managed by the key vault service. + suboptions: + enabled: + description: bool + not_before: + description: + - not valid before date in UTC ISO format without the Z at the end + expires: + description: + - not valid after date in UTC ISO format without the Z at the end + curve: + description: + - Elliptic curve name. For valid values, see JsonWebKeyCurveName. Possible values include P-256, P-384, P-521, P-256K. + byok_file: + description: + - BYOK file. + pem_file: + description: + - PEM file. + pem_password: + description: + - PEM password. + state: + description: + - Assert the state of the key. Use C(present) to create a key and C(absent) to delete a key. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Ian Philpot (@iphilpot) + +''' + +EXAMPLES = ''' + - name: Create a key + azure_rm_keyvaultkey: + key_name: MyKey + keyvault_uri: https://contoso.vault.azure.net/ + + - name: Delete a key + azure_rm_keyvaultkey: + key_name: MyKey + keyvault_uri: https://contoso.vault.azure.net/ + state: absent +''' + +RETURN = ''' +state: + description: + - Current state of the key. + returned: success + type: complex + contains: + key_id: + description: + - key resource path. + type: str + example: https://contoso.vault.azure.net/keys/hello/e924f053839f4431b35bc54393f98423 +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + import re + import codecs + from azure.keyvault import KeyVaultClient, KeyVaultId, KeyVaultAuthentication + from azure.keyvault.models import KeyAttributes, JsonWebKey + from azure.common.credentials import ServicePrincipalCredentials, get_cli_profile + from datetime import datetime + from msrestazure.azure_active_directory import MSIAuthentication + from OpenSSL import crypto +except ImportError: + # This is handled in azure_rm_common + pass + +key_addribute_spec = dict( + enabled=dict(type='bool', required=False), + not_before=dict(type='str', no_log=True, required=False), + expires=dict(type='str', no_log=True, required=False) +) + + +class AzureRMKeyVaultKey(AzureRMModuleBase): + ''' Module that creates or deletes keys in Azure KeyVault ''' + + def __init__(self): + + self.module_arg_spec = dict( + key_name=dict(type='str', required=True), + keyvault_uri=dict(type='str', no_log=True, required=True), + key_type=dict(type='str', default='RSA'), + key_size=dict(type='int'), + key_attributes=dict(type='dict', no_log=True, options=key_addribute_spec), + curve=dict(type='str'), + pem_file=dict(type='str'), + pem_password=dict(type='str', no_log=True), + byok_file=dict(type='str'), + state=dict(type='str', default='present', choices=['present', 'absent']) + ) + + self.results = dict( + changed=False, + state=dict() + ) + + self.key_name = None + self.keyvault_uri = None + self.key_type = None + self.key_size = None + self.key_attributes = None + self.curve = None + self.pem_file = None + self.pem_password = None + self.state = None + self.client = None + self.tags = None + + required_if = [ + ('pem_password', 'present', ['pem_file']) + ] + + super(AzureRMKeyVaultKey, self).__init__(self.module_arg_spec, + supports_check_mode=True, + required_if=required_if, + supports_tags=True) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + # Create KeyVaultClient + self.client = self.get_keyvault_client() + + results = dict() + changed = False + + try: + results['key_id'] = self.get_key(self.key_name) + + # Key exists and will be deleted + if self.state == 'absent': + changed = True + + except Exception: + # Key doesn't exist + if self.state == 'present': + changed = True + + self.results['changed'] = changed + self.results['state'] = results + + if not self.check_mode: + + # Create key + if self.state == 'present' and changed: + results['key_id'] = self.create_key(self.key_name, self.key_type, self.key_size, self.key_attributes, + self.curve, self.tags) + self.results['state'] = results + self.results['state']['status'] = 'Created' + # Delete key + elif self.state == 'absent' and changed: + results['key_id'] = self.delete_key(self.key_name) + self.results['state'] = results + self.results['state']['status'] = 'Deleted' + else: + if self.state == 'present' and changed: + self.results['state']['status'] = 'Created' + elif self.state == 'absent' and changed: + self.results['state']['status'] = 'Deleted' + + return self.results + + def get_keyvault_client(self): + kv_url = self.azure_auth._cloud_environment.suffixes.keyvault_dns.split('.', 1).pop() + # Don't use MSI credentials if the auth_source isn't set to MSI. The below will Always result in credentials when running on an Azure VM. + if self.module.params['auth_source'] == 'msi': + try: + self.log("Get KeyVaultClient from MSI") + credentials = MSIAuthentication(resource="https://{0}".format(kv_url)) + return KeyVaultClient(credentials) + except Exception: + self.log("Get KeyVaultClient from service principal") + elif (self.module.params['auth_source'] == 'cli' + or (self.module.params['auth_source'] == 'auto' + and self.credentials['client_id'] is None + and self.credentials['secret'] is None)): + try: + profile = get_cli_profile() + credentials, subscription_id, tenant = profile.get_login_credentials( + subscription_id=self.credentials['subscription_id'], resource="https://{0}".format(kv_url)) + return KeyVaultClient(credentials) + except Exception as exc: + self.log("Get KeyVaultClient from service principal") + # self.fail("Failed to load CLI profile {0}.".format(str(exc))) + + # Create KeyVault Client using KeyVault auth class and auth_callback + def auth_callback(server, resource, scope): + if self.credentials['client_id'] is None or self.credentials['secret'] is None: + self.fail('Please specify client_id, secret and tenant to access azure Key Vault.') + + tenant = self.credentials.get('tenant') + if not self.credentials['tenant']: + tenant = "common" + + authcredential = ServicePrincipalCredentials( + client_id=self.credentials['client_id'], + secret=self.credentials['secret'], + tenant=tenant, + cloud_environment=self._cloud_environment, + resource="https://{0}".format(kv_url)) + + token = authcredential.token + return token['token_type'], token['access_token'] + + return KeyVaultClient(KeyVaultAuthentication(auth_callback)) + + def get_key(self, name, version=''): + ''' Gets an existing key ''' + key_bundle = self.client.get_key(self.keyvault_uri, name, version) + if key_bundle: + key_id = KeyVaultId.parse_key_id(key_bundle.key.kid) + return key_id.id + + def create_key(self, name, key_type, key_size, key_attributes, curve, tags): + ''' Creates a key ''' + + if key_attributes is not None: + k_enabled = key_attributes.get('enabled', True) + k_not_before = key_attributes.get('not_before', None) + k_expires = key_attributes.get('expires', None) + if k_not_before: + k_not_before = datetime.fromisoformat(k_not_before.replace('Z', '+00:00')) + if k_expires: + k_expires = datetime.fromisoformat(k_expires.replace('Z', '+00:00')) + + key_attributes = KeyAttributes(enabled=k_enabled, not_before=k_not_before, expires=k_expires) + + key_bundle = self.client.create_key(vault_base_url=self.keyvault_uri, key_name=name, kty=key_type, key_size=key_size, + key_attributes=key_attributes, curve=curve, tags=tags) + key_id = KeyVaultId.parse_key_id(key_bundle.key.kid) + return key_id.id + + def delete_key(self, name): + ''' Deletes a key ''' + deleted_key = self.client.delete_key(self.keyvault_uri, name) + key_id = KeyVaultId.parse_key_id(deleted_key.key.kid) + return key_id.id + + def import_key(self, key_name, destination=None, key_ops=None, disabled=False, expires=None, + not_before=None, tags=None, pem_file=None, pem_password=None, byok_file=None): + """ Import a private key. Supports importing base64 encoded private keys from PEM files. + Supports importing BYOK keys into HSM for premium KeyVaults. """ + + def _to_bytes(hex_string): + # zero pads and decodes a hex string + if len(hex_string) % 2: + hex_string = '{0}'.format(hex_string) + return codecs.decode(hex_string, 'hex_codec') + + def _set_rsa_parameters(dest, src): + # map OpenSSL parameter names to JsonWebKey property names + conversion_dict = { + 'modulus': 'n', + 'publicExponent': 'e', + 'privateExponent': 'd', + 'prime1': 'p', + 'prime2': 'q', + 'exponent1': 'dp', + 'exponent2': 'dq', + 'coefficient': 'qi' + } + # regex: looks for matches that fit the following patterns: + # integerPattern: 65537 (0x10001) + # hexPattern: + # 00:a0:91:4d:00:23:4a:c6:83:b2:1b:4c:15:d5:be: + # d8:87:bd:c9:59:c2:e5:7a:f5:4a:e7:34:e8:f0:07: + # The desired match should always be the first component of the match + regex = re.compile(r'([^:\s]*(:[^\:)]+\))|([^:\s]*(:\s*[0-9A-Fa-f]{2})+))') + # regex2: extracts the hex string from a format like: 65537 (0x10001) + regex2 = re.compile(r'(?<=\(0x{1})([0-9A-Fa-f]*)(?=\))') + + key_params = crypto.dump_privatekey(crypto.FILETYPE_TEXT, src).decode('utf-8') + for match in regex.findall(key_params): + comps = match[0].split(':', 1) + name = conversion_dict.get(comps[0], None) + if name: + value = comps[1].replace(' ', '').replace('\n', '').replace(':', '') + try: + value = _to_bytes(value) + except Exception: # pylint:disable=broad-except + # if decoding fails it is because of an integer pattern. Extract the hex + # string and retry + value = _to_bytes(regex2.findall(value)[0]) + setattr(dest, name, value) + + key_attrs = KeyAttributes(not disabled, not_before, expires) + key_obj = JsonWebKey(key_ops=key_ops) + if pem_file: + key_obj.kty = 'RSA' + with open(pem_file, 'r') as f: + pem_data = f.read() + # load private key and prompt for password if encrypted + try: + pem_password = str(pem_password).encode() if pem_password else None + # despite documentation saying password should be a string, it needs to actually + # be UTF-8 encoded bytes + pkey = crypto.load_privatekey(crypto.FILETYPE_PEM, pem_data, pem_password) + except crypto.Error: + pass # wrong password + except TypeError: + pass # no pass provided + _set_rsa_parameters(key_obj, pkey) + elif byok_file: + with open(byok_file, 'rb') as f: + byok_data = f.read() + key_obj.kty = 'RSA-HSM' + key_obj.t = byok_data + + return self.client.import_key( + self.keyvault_uri, key_name, key_obj, destination == 'hsm', key_attrs, tags) + + +def main(): + AzureRMKeyVaultKey() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_keyvaultkey_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_keyvaultkey_info.py new file mode 100644 index 000000000..7247ec4eb --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_keyvaultkey_info.py @@ -0,0 +1,477 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Yunge Zhu, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_keyvaultkey_info +version_added: "0.1.2" +short_description: Get Azure Key Vault key facts +description: + - Get facts of Azure Key Vault key. + +options: + vault_uri: + description: + - Vault uri where the key stored in. + required: True + type: str + name: + description: + - Key name. If not set, will list all keys in I(vault_uri). + type: str + version: + description: + - Key version. + - Set it to C(current) to show latest version of a key. + - Set it to C(all) to list all versions of a key. + - Set it to specific version to list specific version of a key. eg. fd2682392a504455b79c90dd04a1bf46. + default: current + type: str + show_deleted_key: + description: + - Set to C(true) to show deleted keys. Set to C(false) to show not deleted keys. + type: bool + default: false + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Yunge Zhu (@yungezz) + +''' + +EXAMPLES = ''' + - name: Get latest version of specific key + azure_rm_keyvaultkey_info: + vault_uri: "https://myVault.vault.azure.net" + name: myKey + + - name: List all versions of specific key + azure_rm_keyvaultkey_info: + vault_uri: "https://myVault.vault.azure.net" + name: myKey + version: all + + - name: List specific version of specific key + azure_rm_keyvaultkey_info: + vault_uri: "https://myVault.vault.azure.net" + name: myKey + version: fd2682392a504455b79c90dd04a1bf46 + + - name: List all keys in specific key vault + azure_rm_keyvaultkey_info: + vault_uri: "https://myVault.vault.azure.net" + + - name: List deleted keys in specific key vault + azure_rm_keyvaultkey_info: + vault_uri: "https://myVault.vault.azure.net" + show_deleted_key: True +''' + +RETURN = ''' +keyvaults: + description: + - List of keys in Azure Key Vault. + returned: always + type: complex + contains: + kid: + description: + - Key identifier. + returned: always + type: str + sample: "https://myVault.vault.azure.net/keys/key1/fd2682392a504455b79c90dd04a1bf46" + permitted_operations: + description: + - Permitted operations on the key. + type: list + returned: always + sample: encrypt + type: + description: + - Key type. + type: str + returned: always + sample: RSA + version: + description: + - Key version. + type: str + returned: always + sample: fd2682392a504455b79c90dd04a1bf46 + key: + description: + - public part of a key. + contains: + n: + description: + - RSA modules. + type: str + e: + description: + - RSA public exponent. + type: str + crv: + description: + - Elliptic curve name. + type: str + x: + description: + - X component of an EC public key. + type: str + y: + description: + - Y component of an EC public key. + type: str + managed: + description: + - C(True) if the key's lifetime is managed by key vault. + type: bool + sample: True + tags: + description: + - Tags of the key. + returned: always + type: list + sample: [foo, ] + attributes: + description: + - Key attributes. + contains: + created: + description: + - Creation datetime. + returned: always + type: str + sample: "2019-04-25T07:26:49+00:00" + not_before: + description: + - Not before datetime. + type: str + sample: "2019-04-25T07:26:49+00:00" + expires: + description: + - Expiration datetime. + type: str + sample: "2019-04-25T07:26:49+00:00" + updated: + description: + - Update datetime. + returned: always + type: str + sample: "2019-04-25T07:26:49+00:00" + enabled: + description: + - Indicate whether the key is enabled. + returned: always + type: str + sample: true + recovery_level: + description: + - Reflects the deletion recovery level currently in effect for keys in the current vault. + - If it contains C(Purgeable) the key can be permanently deleted by a privileged user. + - Otherwise, only the system can purge the key, at the end of the retention interval. + returned: always + type: str + sample: Purgable +''' + + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.keyvault import KeyVaultClient, KeyVaultId, KeyVaultAuthentication, KeyId + from azure.keyvault.models import KeyAttributes, JsonWebKey + from azure.common.credentials import ServicePrincipalCredentials, get_cli_profile + from msrestazure.azure_active_directory import MSIAuthentication +except ImportError: + # This is handled in azure_rm_common + pass + + +def keybundle_to_dict(bundle): + return dict( + tags=bundle.tags, + managed=bundle.managed, + attributes=dict( + enabled=bundle.attributes.enabled, + not_before=bundle.attributes.not_before, + expires=bundle.attributes.expires, + created=bundle.attributes.created, + updated=bundle.attributes.updated, + recovery_level=bundle.attributes.recovery_level + ), + kid=bundle.key.kid, + version=KeyVaultId.parse_key_id(bundle.key.kid).version, + type=bundle.key.kty, + permitted_operations=bundle.key.key_ops, + key=dict( + n=bundle.key.n if hasattr(bundle.key, 'n') else None, + e=bundle.key.e if hasattr(bundle.key, 'e') else None, + crv=bundle.key.crv if hasattr(bundle.key, 'crv') else None, + x=bundle.key.x if hasattr(bundle.key, 'x') else None, + y=bundle.key.y if hasattr(bundle.key, 'y') else None + ) + ) + + +def deletedkeybundle_to_dict(bundle): + keybundle = keybundle_to_dict(bundle) + keybundle['recovery_id'] = bundle.recovery_id, + keybundle['scheduled_purge_date'] = bundle.scheduled_purge_date, + keybundle['deleted_date'] = bundle.deleted_date + return keybundle + + +def keyitem_to_dict(keyitem): + return dict( + kid=keyitem.kid, + version=KeyVaultId.parse_key_id(keyitem.kid).version, + tags=keyitem.tags, + manged=keyitem.managed, + attributes=dict( + enabled=keyitem.attributes.enabled, + not_before=keyitem.attributes.not_before, + expires=keyitem.attributes.expires, + created=keyitem.attributes.created, + updated=keyitem.attributes.updated, + recovery_level=keyitem.attributes.recovery_level + ) + ) + + +def deletedkeyitem_to_dict(keyitem): + item = keyitem_to_dict(keyitem) + item['recovery_id'] = keyitem.recovery_id, + item['scheduled_purge_date'] = keyitem.scheduled_purge_date, + item['deleted_date'] = keyitem.deleted_date + return item + + +class AzureRMKeyVaultKeyInfo(AzureRMModuleBase): + + def __init__(self): + self.module_arg_spec = dict( + version=dict(type='str', default='current'), + name=dict(type='str'), + vault_uri=dict(type='str', required=True), + show_deleted_key=dict(type='bool', default=False), + tags=dict(type='list', elements='str') + ) + + self.vault_uri = None + self.name = None + self.version = None + self.show_deleted_key = False + self.tags = None + + self.results = dict(changed=False) + self._client = None + + super(AzureRMKeyVaultKeyInfo, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + facts_module=True) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()): + if hasattr(self, key): + setattr(self, key, kwargs[key]) + + self._client = self.get_keyvault_client() + + if self.name: + if self.show_deleted_key: + self.results['keys'] = self.get_deleted_key() + else: + if self.version == 'all': + self.results['keys'] = self.get_key_versions() + else: + self.results['keys'] = self.get_key() + else: + if self.show_deleted_key: + self.results['keys'] = self.list_deleted_keys() + else: + self.results['keys'] = self.list_keys() + + return self.results + + def get_keyvault_client(self): + kv_url = self.azure_auth._cloud_environment.suffixes.keyvault_dns.split('.', 1).pop() + # Don't use MSI credentials if the auth_source isn't set to MSI. The below will Always result in credentials when running on an Azure VM. + if self.module.params['auth_source'] == 'msi': + try: + self.log("Get KeyVaultClient from MSI") + credentials = MSIAuthentication(resource="https://{0}".format(kv_url)) + return KeyVaultClient(credentials) + except Exception: + self.log("Get KeyVaultClient from service principal") + elif (self.module.params['auth_source'] == 'cli' + or (self.module.params['auth_source'] == 'auto' + and self.credentials['client_id'] is None + and self.credentials['secret'] is None)): + try: + profile = get_cli_profile() + credentials, subscription_id, tenant = profile.get_login_credentials( + subscription_id=self.credentials['subscription_id'], resource="https://{0}".format(kv_url)) + return KeyVaultClient(credentials) + except Exception as exc: + self.log("Get KeyVaultClient from service principal") + # self.fail("Failed to load CLI profile {0}.".format(str(exc))) + + # Create KeyVault Client using KeyVault auth class and auth_callback + def auth_callback(server, resource, scope): + if self.credentials['client_id'] is None or self.credentials['secret'] is None: + self.fail('Please specify client_id, secret and tenant to access azure Key Vault.') + + tenant = self.credentials.get('tenant') + if not self.credentials['tenant']: + tenant = "common" + + authcredential = ServicePrincipalCredentials( + client_id=self.credentials['client_id'], + secret=self.credentials['secret'], + tenant=tenant, + cloud_environment=self._cloud_environment, + resource="https://{0}".format(kv_url)) + + token = authcredential.token + return token['token_type'], token['access_token'] + + return KeyVaultClient(KeyVaultAuthentication(auth_callback)) + + def get_key(self): + ''' + Gets the properties of the specified key in key vault. + + :return: deserialized key state dictionary + ''' + self.log("Get the key {0}".format(self.name)) + + results = [] + try: + if self.version == 'current': + response = self._client.get_key(vault_base_url=self.vault_uri, + key_name=self.name, + key_version='') + else: + response = self._client.get_key(vault_base_url=self.vault_uri, + key_name=self.name, + key_version=self.version) + + if response and self.has_tags(response.tags, self.tags): + self.log("Response : {0}".format(response)) + results.append(keybundle_to_dict(response)) + + except Exception as e: + self.log("Did not find the key vault key {0}: {1}".format(self.name, str(e))) + return results + + def get_key_versions(self): + ''' + Lists keys versions. + + :return: deserialized versions of key, includes key identifier, attributes and tags + ''' + self.log("Get the key versions {0}".format(self.name)) + + results = [] + try: + response = self._client.get_key_versions(vault_base_url=self.vault_uri, + key_name=self.name) + self.log("Response : {0}".format(response)) + + if response: + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(keyitem_to_dict(item)) + except Exception as e: + self.log("Did not find key versions {0} : {1}.".format(self.name, str(e))) + return results + + def list_keys(self): + ''' + Lists keys in specific key vault. + + :return: deserialized keys, includes key identifier, attributes and tags. + ''' + self.log("Get the key vaults in current subscription") + + results = [] + try: + response = self._client.get_keys(vault_base_url=self.vault_uri) + self.log("Response : {0}".format(response)) + + if response: + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(keyitem_to_dict(item)) + except Exception as e: + self.log("Did not find key vault in current subscription {0}.".format(str(e))) + return results + + def get_deleted_key(self): + ''' + Gets the properties of the specified deleted key in key vault. + + :return: deserialized key state dictionary + ''' + self.log("Get the key {0}".format(self.name)) + + results = [] + try: + response = self._client.get_deleted_key(vault_base_url=self.vault_uri, + key_name=self.name) + + if response and self.has_tags(response.tags, self.tags): + self.log("Response : {0}".format(response)) + results.append(deletedkeybundle_to_dict(response)) + + except Exception as e: + self.log("Did not find the key vault key {0}: {1}".format(self.name, str(e))) + return results + + def list_deleted_keys(self): + ''' + Lists deleted keys in specific key vault. + + :return: deserialized keys, includes key identifier, attributes and tags. + ''' + self.log("Get the key vaults in current subscription") + + results = [] + try: + response = self._client.get_deleted_keys(vault_base_url=self.vault_uri) + self.log("Response : {0}".format(response)) + + if response: + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(deletedkeyitem_to_dict(item)) + except Exception as e: + self.log("Did not find key vault in current subscription {0}.".format(str(e))) + return results + + +def main(): + """Main execution""" + AzureRMKeyVaultKeyInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_keyvaultsecret.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_keyvaultsecret.py new file mode 100644 index 000000000..0a5288abb --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_keyvaultsecret.py @@ -0,0 +1,271 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_keyvaultsecret +version_added: "0.1.2" +short_description: Use Azure KeyVault Secrets +description: + - Create or delete a secret within a given keyvault. + - By using Key Vault, you can encrypt keys and secrets. + - Such as authentication keys, storage account keys, data encryption keys, .PFX files, and passwords. +options: + keyvault_uri: + description: + - URI of the keyvault endpoint. + required: true + content_type: + description: + - Type of the secret value such as a password. + type: str + secret_name: + description: + - Name of the keyvault secret. + required: true + secret_value: + description: + - Secret to be secured by keyvault. + secret_expiry: + description: + - Optional expiry datetime for secret + type: str + secret_valid_from: + description: + - Optional valid-from datetime for secret + type: str + state: + description: + - Assert the state of the subnet. Use C(present) to create or update a secret and C(absent) to delete a secret . + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Ian Philpot (@iphilpot) + +''' + +EXAMPLES = ''' + - name: Create a secret + azure_rm_keyvaultsecret: + secret_name: MySecret + secret_value: My_Pass_Sec + keyvault_uri: https://contoso.vault.azure.net/ + tags: + testing: testing + delete: never + + - name: Delete a secret + azure_rm_keyvaultsecret: + secret_name: MySecret + keyvault_uri: https://contoso.vault.azure.net/ + state: absent +''' + +RETURN = ''' +state: + description: + - Current state of the secret. + returned: success + type: complex + contains: + secret_id: + description: + - Secret resource path. + type: str + example: https://contoso.vault.azure.net/secrets/hello/e924f053839f4431b35bc54393f98423 +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.keyvault import KeyVaultClient, KeyVaultAuthentication, KeyVaultId + from azure.common.credentials import ServicePrincipalCredentials, get_cli_profile + from msrestazure.azure_active_directory import MSIAuthentication + import dateutil.parser + from azure.keyvault.models import SecretAttributes + # from azure.keyvault.models.secret_attributes import SecretAttributes +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMKeyVaultSecret(AzureRMModuleBase): + ''' Module that creates or deletes secrets in Azure KeyVault ''' + + def __init__(self): + + self.module_arg_spec = dict( + secret_name=dict(type='str', required=True), + secret_value=dict(type='str', no_log=True), + secret_valid_from=dict(type='str', no_log=True), + secret_expiry=dict(type='str', no_log=True), + keyvault_uri=dict(type='str', no_log=True, required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + content_type=dict(type='str') + ) + + required_if = [ + ('state', 'present', ['secret_value']) + ] + + self.results = dict( + changed=False, + state=dict() + ) + + self.secret_name = None + self.secret_value = None + self.secret_valid_from = None + self.secret_expiry = None + self.keyvault_uri = None + self.state = None + self.data_creds = None + self.client = None + self.tags = None + self.content_type = None + + super(AzureRMKeyVaultSecret, self).__init__(self.module_arg_spec, + supports_check_mode=True, + required_if=required_if, + supports_tags=True) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + # Create KeyVault Client + self.client = self.get_keyvault_client() + + results = dict() + changed = False + + try: + results = self.get_secret(self.secret_name) + + # Secret exists and will be deleted + if self.state == 'absent': + changed = True + elif self.secret_value and results['secret_value'] != self.secret_value: + changed = True + + except Exception: + # Secret doesn't exist + if self.state == 'present': + changed = True + + self.results['changed'] = changed + self.results['state'] = results + + valid_from = self.secret_valid_from + if isinstance(valid_from, str) and len(valid_from) > 0: + valid_from = dateutil.parser.parse(valid_from) + + expiry = self.secret_expiry + if isinstance(expiry, str) and len(expiry) > 0: + expiry = dateutil.parser.parse(expiry) + + if not self.check_mode: + # Create secret + if self.state == 'present' and changed: + results['secret_id'] = self.create_update_secret(self.secret_name, self.secret_value, self.tags, self.content_type, valid_from, expiry) + self.results['state'] = results + self.results['state']['status'] = 'Created' + # Delete secret + elif self.state == 'absent' and changed: + results['secret_id'] = self.delete_secret(self.secret_name) + self.results['state'] = results + self.results['state']['status'] = 'Deleted' + else: + if self.state == 'present' and changed: + self.results['state']['status'] = 'Created' + elif self.state == 'absent' and changed: + self.results['state']['status'] = 'Deleted' + + return self.results + + def get_keyvault_client(self): + kv_url = self.azure_auth._cloud_environment.suffixes.keyvault_dns.split('.', 1).pop() + # Don't use MSI credentials if the auth_source isn't set to MSI. The below will Always result in credentials when running on an Azure VM. + if self.module.params['auth_source'] == 'msi': + try: + self.log("Get KeyVaultClient from MSI") + credentials = MSIAuthentication(resource="https://{0}".format(kv_url)) + return KeyVaultClient(credentials) + except Exception: + self.log("Get KeyVaultClient from service principal") + elif (self.module.params['auth_source'] == 'cli' + or (self.module.params['auth_source'] == 'auto' + and self.credentials['client_id'] is None + and self.credentials['secret'] is None)): + try: + profile = get_cli_profile() + credentials, subscription_id, tenant = profile.get_login_credentials( + subscription_id=self.credentials['subscription_id'], resource="https://{0}".format(kv_url)) + return KeyVaultClient(credentials) + except Exception as exc: + self.log("Get KeyVaultClient from service principal") + # self.fail("Failed to load CLI profile {0}.".format(str(exc))) + + # Create KeyVault Client using KeyVault auth class and auth_callback + def auth_callback(server, resource, scope): + if self.credentials['client_id'] is None or self.credentials['secret'] is None: + self.fail('Please specify client_id, secret and tenant to access azure Key Vault.') + + tenant = self.credentials.get('tenant') + if not self.credentials['tenant']: + tenant = "common" + + authcredential = ServicePrincipalCredentials( + client_id=self.credentials['client_id'], + secret=self.credentials['secret'], + tenant=tenant, + cloud_environment=self._cloud_environment, + resource="https://{0}".format(kv_url)) + + token = authcredential.token + return token['token_type'], token['access_token'] + + return KeyVaultClient(KeyVaultAuthentication(auth_callback)) + + def get_secret(self, name, version=''): + ''' Gets an existing secret ''' + secret_bundle = self.client.get_secret(self.keyvault_uri, name, version) + if secret_bundle: + secret_id = KeyVaultId.parse_secret_id(secret_bundle.id) + return dict(secret_id=secret_id.id, secret_value=secret_bundle.value) + return None + + def create_update_secret(self, name, secret, tags, content_type, valid_from, expiry): + ''' Creates/Updates a secret ''' + secret_attributes = SecretAttributes(expires=expiry, not_before=valid_from) + secret_bundle = self.client.set_secret(self.keyvault_uri, name, secret, tags=tags, content_type=content_type, secret_attributes=secret_attributes) + secret_id = KeyVaultId.parse_secret_id(secret_bundle.id) + return secret_id.id + + def delete_secret(self, name): + ''' Deletes a secret ''' + deleted_secret = self.client.delete_secret(self.keyvault_uri, name) + secret_id = KeyVaultId.parse_secret_id(deleted_secret.id) + return secret_id.id + + +def main(): + AzureRMKeyVaultSecret() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_keyvaultsecret_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_keyvaultsecret_info.py new file mode 100644 index 000000000..a785b8ce7 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_keyvaultsecret_info.py @@ -0,0 +1,444 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Jose Angel Munoz, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_keyvaultsecret_info +version_added: "0.1.2" +short_description: Get Azure Key Vault secret facts +description: + - Get facts of Azure Key Vault secret. + +options: + vault_uri: + description: + - Vault uri where the secret stored in. + required: True + type: str + name: + description: + - Secret name. If not set, will list all secrets in vault_uri. + type: str + version: + description: + - Secret version. + - Set it to C(current) to show latest version of a secret. + - Set it to C(all) to list all versions of a secret. + - Set it to specific version to list specific version of a secret. eg. fd2682392a504455b79c90dd04a1bf46 + default: current + type: str + show_deleted_secret: + description: + - Set to I(show_delete_secret=true) to show deleted secrets. Set to I(show_deleted_secret=false) to show not deleted secrets. + type: bool + default: false + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Jose Angel Munoz (@imjoseangel) + +''' + +EXAMPLES = ''' + - name: Get latest version of specific secret + azure_rm_keyvaultsecret_info: + vault_uri: "https://myVault.vault.azure.net" + name: mySecret + + - name: List all versions of specific secret + azure_rm_keyvaultsecret_info: + vault_uri: "https://myVault.vault.azure.net" + name: mySecret + version: all + + - name: List specific version of specific secret + azure_rm_keyvaultsecret_info: + vault_uri: "https://myVault.vault.azure.net" + name: mySecret + version: fd2682392a504455b79c90dd04a1bf46 + + - name: List all secrets in specific key vault + azure_rm_keyvaultsecret_info: + vault_uri: "https://myVault.vault.azure.net" + + - name: List deleted secrets in specific key vault + azure_rm_keyvaultsecret_info: + vault_uri: "https://myVault.vault.azure.net" + show_deleted_secret: True +''' + +RETURN = ''' +secrets: + description: + - List of secrets in Azure Key Vault. + returned: always + type: complex + contains: + sid: + description: + - Secret identifier. + returned: always + type: str + sample: "https://myVault.vault.azure.net/flexsecret/secret1/fd2682392a504455b79c90dd04a1bf46" + version: + description: + - Secret version. + type: str + returned: always + sample: fd2682392a504455b79c90dd04a1bf46 + secret: + description: secret value. + type: str + returned: always + sample: mysecretvault + tags: + description: + - Tags of the secret. + returned: always + type: dict + sample: {"delete": "on-exit"} + content_type: + description: + - Content type (optional) + returned: always + type: str + sample: mysecrettype + attributes: + description: + - Secret attributes. + type: dict + contains: + created: + description: + - Creation datetime. + returned: always + type: str + sample: "2019-04-25T07:26:49+00:00" + not_before: + description: + - Not before datetime. + type: str + sample: "2019-04-25T07:26:49+00:00" + expires: + description: + - Expiration datetime. + type: str + sample: "2019-04-25T07:26:49+00:00" + updated: + description: + - Update datetime. + returned: always + type: str + sample: "2019-04-25T07:26:49+00:00" + enabled: + description: + - Indicate whether the secret is enabled. + returned: always + type: str + sample: true + recovery_level: + description: + - Reflects the deletion recovery level currently in effect for secrets in the current vault. + - If it contains 'Purgeable' the secret can be permanently deleted by a privileged user, + - Otherwise, only the system can purge the secret, at the end of the retention interval. + returned: always + type: str + sample: Recoverable+Purgeable +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.keyvault import KeyVaultClient, KeyVaultId, KeyVaultAuthentication + from azure.common.credentials import ServicePrincipalCredentials, get_cli_profile + from msrestazure.azure_active_directory import MSIAuthentication +except ImportError: + # This is handled in azure_rm_common + pass + + +def secretbundle_to_dict(bundle): + return dict(tags=bundle.tags, + attributes=dict( + enabled=bundle.attributes.enabled, + not_before=bundle.attributes.not_before, + expires=bundle.attributes.expires, + created=bundle.attributes.created, + updated=bundle.attributes.updated, + recovery_level=bundle.attributes.recovery_level), + sid=bundle.id, + version=KeyVaultId.parse_secret_id(bundle.id).version, + content_type=bundle.content_type, + secret=bundle.value) + + +def deletedsecretbundle_to_dict(bundle): + secretbundle = secretbundle_to_dict(bundle) + secretbundle['recovery_id'] = bundle.recovery_id, + secretbundle['scheduled_purge_date'] = bundle.scheduled_purge_date, + secretbundle['deleted_date'] = bundle.deleted_date + return secretbundle + + +def secretitem_to_dict(secretitem): + return dict(sid=secretitem.id, + version=KeyVaultId.parse_secret_id(secretitem.id).version, + tags=secretitem.tags, + attributes=dict( + enabled=secretitem.attributes.enabled, + not_before=secretitem.attributes.not_before, + expires=secretitem.attributes.expires, + created=secretitem.attributes.created, + updated=secretitem.attributes.updated, + recovery_level=secretitem.attributes.recovery_level)) + + +def deletedsecretitem_to_dict(secretitem): + item = secretitem_to_dict(secretitem) + item['recovery_id'] = secretitem.recovery_id, + item['scheduled_purge_date'] = secretitem.scheduled_purge_date, + item['deleted_date'] = secretitem.deleted_date + return item + + +class AzureRMKeyVaultSecretInfo(AzureRMModuleBase): + def __init__(self): + self.module_arg_spec = dict(version=dict(type='str', + default='current'), + name=dict(type='str'), + vault_uri=dict(type='str', required=True), + show_deleted_secret=dict(type='bool', + default=False), + tags=dict(type='list', elements='str')) + + self.vault_uri = None + self.name = None + self.version = None + self.show_deleted_secret = False + self.tags = None + + self.results = dict(changed=False) + self._client = None + + super(AzureRMKeyVaultSecretInfo, + self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + facts_module=True) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for secret in list(self.module_arg_spec.keys()): + if hasattr(self, secret): + setattr(self, secret, kwargs[secret]) + + self._client = self.get_keyvault_client() + + if self.name: + if self.show_deleted_secret: + self.results['secrets'] = self.get_deleted_secret() + else: + if self.version == 'all': + self.results['secrets'] = self.get_secret_versions() + else: + self.results['secrets'] = self.get_secret() + else: + if self.show_deleted_secret: + self.results['secrets'] = self.list_deleted_secrets() + else: + self.results['secrets'] = self.list_secrets() + + return self.results + + def get_keyvault_client(self): + kv_url = self.azure_auth._cloud_environment.suffixes.keyvault_dns.split('.', 1).pop() + # Don't use MSI credentials if the auth_source isn't set to MSI. The below will Always result in credentials when running on an Azure VM. + if self.module.params['auth_source'] == 'msi': + try: + self.log("Get KeyVaultClient from MSI") + resource = self.azure_auth._cloud_environment.suffixes.keyvault_dns.split('.', 1).pop() + credentials = MSIAuthentication(resource="https://{0}".format(resource)) + return KeyVaultClient(credentials) + except Exception: + self.log("Get KeyVaultClient from service principal") + elif (self.module.params['auth_source'] == 'cli' + or (self.module.params['auth_source'] == 'auto' + and self.credentials['client_id'] is None + and self.credentials['secret'] is None)): + try: + profile = get_cli_profile() + credentials, subscription_id, tenant = profile.get_login_credentials( + subscription_id=self.credentials['subscription_id'], resource="https://{0}".format(kv_url)) + return KeyVaultClient(credentials) + except Exception as exc: + self.log("Get KeyVaultClient from service principal") + # self.fail("Failed to load CLI profile {0}.".format(str(exc))) + + # Create KeyVault Client using KeyVault auth class and auth_callback + def auth_callback(server, resource, scope): + if self.credentials['client_id'] is None or self.credentials[ + 'secret'] is None: + self.fail( + 'Please specify client_id, secret and tenant to access azure Key Vault.' + ) + + tenant = self.credentials.get('tenant') + if not self.credentials['tenant']: + tenant = "common" + + authcredential = ServicePrincipalCredentials( + client_id=self.credentials['client_id'], + secret=self.credentials['secret'], + tenant=tenant, + cloud_environment=self._cloud_environment, + resource="https://{0}".format(kv_url)) + + token = authcredential.token + return token['token_type'], token['access_token'] + + return KeyVaultClient(KeyVaultAuthentication(auth_callback)) + + def get_secret(self): + ''' + Gets the properties of the specified secret in key vault. + + :return: deserialized secret state dictionary + ''' + self.log("Get the secret {0}".format(self.name)) + + results = [] + try: + if self.version == 'current': + response = self._client.get_secret( + vault_base_url=self.vault_uri, + secret_name=self.name, + secret_version='') + else: + response = self._client.get_secret( + vault_base_url=self.vault_uri, + secret_name=self.name, + secret_version=self.version) + + if response and self.has_tags(response.tags, self.tags): + self.log("Response : {0}".format(response)) + results.append(secretbundle_to_dict(response)) + + except Exception as e: + self.log("Did not find the key vault secret {0}: {1}".format( + self.name, str(e))) + return results + + def get_secret_versions(self): + ''' + Lists secrets versions. + + :return: deserialized versions of secret, includes secret identifier, attributes and tags + ''' + self.log("Get the secret versions {0}".format(self.name)) + + results = [] + try: + response = self._client.get_secret_versions( + vault_base_url=self.vault_uri, secret_name=self.name) + self.log("Response : {0}".format(response)) + + if response: + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(secretitem_to_dict(item)) + except Exception as e: + self.log("Did not find secret versions {0} : {1}.".format( + self.name, str(e))) + return results + + def list_secrets(self): + ''' + Lists secrets in specific key vault. + + :return: deserialized secrets, includes secret identifier, attributes and tags. + ''' + self.log("Get the key vaults in current subscription") + + results = [] + try: + response = self._client.get_secrets(vault_base_url=self.vault_uri) + self.log("Response : {0}".format(response)) + + if response: + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(secretitem_to_dict(item)) + except Exception as e: + self.log( + "Did not find key vault in current subscription {0}.".format( + str(e))) + return results + + def get_deleted_secret(self): + ''' + Gets the properties of the specified deleted secret in key vault. + + :return: deserialized secret state dictionary + ''' + self.log("Get the secret {0}".format(self.name)) + + results = [] + try: + response = self._client.get_deleted_secret( + vault_base_url=self.vault_uri, secret_name=self.name) + + if response and self.has_tags(response.tags, self.tags): + self.log("Response : {0}".format(response)) + results.append(deletedsecretbundle_to_dict(response)) + + except Exception as e: + self.log("Did not find the key vault secret {0}: {1}".format( + self.name, str(e))) + return results + + def list_deleted_secrets(self): + ''' + Lists deleted secrets in specific key vault. + + :return: deserialized secrets, includes secret identifier, attributes and tags. + ''' + self.log("Get the key vaults in current subscription") + + results = [] + try: + response = self._client.get_deleted_secrets( + vault_base_url=self.vault_uri) + self.log("Response : {0}".format(response)) + + if response: + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(deletedsecretitem_to_dict(item)) + except Exception as e: + self.log( + "Did not find key vault in current subscription {0}.".format( + str(e))) + return results + + +def main(): + """Main execution""" + AzureRMKeyVaultSecretInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_loadbalancer.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_loadbalancer.py new file mode 100644 index 000000000..86c4335f8 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_loadbalancer.py @@ -0,0 +1,1075 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Thomas Stringer +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_loadbalancer + +version_added: "0.1.2" + +short_description: Manage Azure load balancers + +description: + - Create, update and delete Azure load balancers. + +options: + resource_group: + description: + - Name of a resource group where the load balancer exists or will be created. + required: true + name: + description: + - Name of the load balancer. + required: true + state: + description: + - Assert the state of the load balancer. Use C(present) to create/update a load balancer, or C(absent) to delete one. + default: present + choices: + - absent + - present + location: + description: + - Valid Azure location. Defaults to location of the resource group. + sku: + description: + - The load balancer SKU. + choices: + - Basic + - Standard + frontend_ip_configurations: + description: + - List of frontend IPs to be used. + suboptions: + name: + description: + - Name of the frontend ip configuration. + required: True + public_ip_address: + description: + - Name of an existing public IP address object in the current resource group to associate with the security group. + private_ip_address: + description: + - The reference of the Public IP resource. + private_ip_allocation_method: + description: + - The Private IP allocation method. + choices: + - Static + - Dynamic + subnet: + description: + - The reference of the subnet resource. + - Should be an existing subnet's resource id. + zones: + description: + - list of availability zones denoting the IP allocated for the resource needs to come from. + - This must be specified I(sku=Standard) and I(subnet) when setting zones. + type: list + elements: str + backend_address_pools: + description: + - List of backend address pools. + suboptions: + name: + description: + - Name of the backend address pool. + required: True + probes: + description: + - List of probe definitions used to check endpoint health. + suboptions: + name: + description: + - Name of the probe. + required: True + port: + description: + - Probe port for communicating the probe. Possible values range from 1 to 65535, inclusive. + required: True + protocol: + description: + - The protocol of the end point to be probed. + - If C(Tcp) is specified, a received ACK is required for the probe to be successful. + - If C(Http) or C(Https) is specified, a 200 OK response from the specified URL is required for the probe to be successful. + choices: + - Tcp + - Http + - Https + interval: + description: + - The interval, in seconds, for how frequently to probe the endpoint for health status. + - Slightly less than half the allocated timeout period, which allows two full probes before taking the instance out of rotation. + - The default value is C(15), the minimum value is C(5). + default: 15 + fail_count: + description: + - The number of probes where if no response, will result in stopping further traffic from being delivered to the endpoint. + - This values allows endpoints to be taken out of rotation faster or slower than the typical times used in Azure. + default: 3 + aliases: + - number_of_probes + request_path: + description: + - The URI used for requesting health status from the VM. + - Path is required if I(protocol=Http) or I(protocol=Https). Otherwise, it is not allowed. + inbound_nat_pools: + description: + - Defines an external port range for inbound NAT to a single backend port on NICs associated with a load balancer. + - Inbound NAT rules are created automatically for each NIC associated with the Load Balancer using an external port from this range. + - Defining an Inbound NAT pool on your Load Balancer is mutually exclusive with defining inbound Nat rules. + - Inbound NAT pools are referenced from virtual machine scale sets. + - NICs that are associated with individual virtual machines cannot reference an inbound NAT pool. + - They have to reference individual inbound NAT rules. + suboptions: + name: + description: + - Name of the inbound NAT pool. + required: True + frontend_ip_configuration_name: + description: + - A reference to frontend IP addresses. + required: True + protocol: + description: + - IP protocol for the NAT pool. + choices: + - Tcp + - Udp + - All + frontend_port_range_start: + description: + - The first port in the range of external ports that will be used to provide inbound NAT to NICs associated with the load balancer. + - Acceptable values range between 1 and 65534. + required: True + frontend_port_range_end: + description: + - The last port in the range of external ports that will be used to provide inbound NAT to NICs associated with the load balancer. + - Acceptable values range between 1 and 65535. + required: True + backend_port: + description: + - The port used for internal connections on the endpoint. + - Acceptable values are between 1 and 65535. + load_balancing_rules: + description: + - Object collection representing the load balancing rules Gets the provisioning. + suboptions: + name: + description: + - Name of the load balancing rule. + required: True + frontend_ip_configuration: + description: + - A reference to frontend IP addresses. + required: True + backend_address_pool: + description: + - A reference to a pool of DIPs. Inbound traffic is randomly load balanced across IPs in the backend IPs. + required: True + probe: + description: + - The name of the load balancer probe this rule should use for health checks. + required: True + protocol: + description: + - IP protocol for the load balancing rule. + choices: + - Tcp + - Udp + - All + load_distribution: + description: + - The session persistence policy for this rule; C(Default) is no persistence. + choices: + - Default + - SourceIP + - SourceIPProtocol + default: Default + frontend_port: + description: + - The port for the external endpoint. + - Frontend port numbers must be unique across all rules within the load balancer. + - Acceptable values are between 0 and 65534. + - Note that value 0 enables "Any Port". + backend_port: + description: + - The port used for internal connections on the endpoint. + - Acceptable values are between 0 and 65535. + - Note that value 0 enables "Any Port". + idle_timeout: + description: + - The timeout for the TCP idle connection. + - The value can be set between 4 and 30 minutes. + - The default value is C(4) minutes. + - This element is only used when the protocol is set to TCP. + enable_floating_ip: + description: + - Configures a virtual machine's endpoint for the floating IP capability required to configure a SQL AlwaysOn Availability Group. + disable_outbound_snat: + description: + - Configure outbound source network address translation (SNAT). + - The default behavior when omitted is equivalent to I(disable_outbound_snat=True). + - True is equivalent to "(Recommended) Use outbound rules to provide backend pool members access to the internet" in portal. + - False is equivalent to "Use default outbound access" in portal. + inbound_nat_rules: + description: + - Collection of inbound NAT Rules used by a load balancer. + - Defining inbound NAT rules on your load balancer is mutually exclusive with defining an inbound NAT pool. + - Inbound NAT pools are referenced from virtual machine scale sets. + - NICs that are associated with individual virtual machines cannot reference an Inbound NAT pool. + - They have to reference individual inbound NAT rules. + suboptions: + name: + description: + - name of the inbound nat rule. + required: True + frontend_ip_configuration: + description: + - A reference to frontend IP addresses. + required: True + protocol: + description: + - IP protocol for the inbound nat rule. + choices: + - Tcp + - Udp + - All + frontend_port: + description: + - The port for the external endpoint. + - Frontend port numbers must be unique across all rules within the load balancer. + - Acceptable values are between 0 and 65534. + - Note that value 0 enables "Any Port". + backend_port: + description: + - The port used for internal connections on the endpoint. + - Acceptable values are between 0 and 65535. + - Note that value 0 enables "Any Port". + idle_timeout: + description: + - The timeout for the TCP idle connection. + - The value can be set between 4 and 30 minutes. + - The default value is C(4) minutes. + - This element is only used when I(protocol=Tcp). + enable_floating_ip: + description: + - Configures a virtual machine's endpoint for the floating IP capability required to configure a SQL AlwaysOn Availability Group. + - This setting is required when using the SQL AlwaysOn Availability Groups in SQL server. + - This setting can't be changed after you create the endpoint. + enable_tcp_reset: + description: + - Receive bidirectional TCP Reset on TCP flow idle timeout or unexpected connection termination. + - This element is only used when I(protocol=Tcp). + public_ip_address_name: + description: + - (deprecated) Name of an existing public IP address object to associate with the security group. + - This option has been deprecated, and will be removed in 2.9. Use I(frontend_ip_configurations) instead. + aliases: + - public_ip_address + - public_ip_name + - public_ip + probe_port: + description: + - (deprecated) The port that the health probe will use. + - This option has been deprecated, and will be removed in 2.9. Use I(probes) instead. + probe_protocol: + description: + - (deprecated) The protocol to use for the health probe. + - This option has been deprecated, and will be removed in 2.9. Use I(probes) instead. + choices: + - Tcp + - Http + - Https + probe_interval: + description: + - (deprecated) Time (in seconds) between endpoint health probes. + - This option has been deprecated, and will be removed in 2.9. Use I(probes) instead. + default: 15 + probe_fail_count: + description: + - (deprecated) The amount of probe failures for the load balancer to make a health determination. + - This option has been deprecated, and will be removed in 2.9. Use I(probes) instead. + default: 3 + probe_request_path: + description: + - (deprecated) The URL that an HTTP probe or HTTPS probe will use (only relevant if I(probe_protocol=Http) or I(probe_protocol=Https)). + - This option has been deprecated, and will be removed in 2.9. Use I(probes) instead. + protocol: + description: + - (deprecated) The protocol (TCP or UDP) that the load balancer will use. + - This option has been deprecated, and will be removed in 2.9. Use I(load_balancing_rules) instead. + choices: + - Tcp + - Udp + load_distribution: + description: + - (deprecated) The type of load distribution that the load balancer will employ. + - This option has been deprecated, and will be removed in 2.9. Use I(load_balancing_rules) instead. + choices: + - Default + - SourceIP + - SourceIPProtocol + frontend_port: + description: + - (deprecated) Frontend port that will be exposed for the load balancer. + - This option has been deprecated, and will be removed in 2.9. Use I(load_balancing_rules) instead. + backend_port: + description: + - (deprecated) Backend port that will be exposed for the load balancer. + - This option has been deprecated, and will be removed in 2.9. Use I(load_balancing_rules) instead. + idle_timeout: + description: + - (deprecated) Timeout for TCP idle connection in minutes. + - This option has been deprecated, and will be removed in 2.9. Use I(load_balancing_rules) instead. + default: 4 + natpool_frontend_port_start: + description: + - (deprecated) Start of the port range for a NAT pool. + - This option has been deprecated, and will be removed in 2.9. Use I(inbound_nat_pools) instead. + natpool_frontend_port_end: + description: + - (deprecated) End of the port range for a NAT pool. + - This option has been deprecated, and will be removed in 2.9. Use I(inbound_nat_pools) instead. + natpool_backend_port: + description: + - (deprecated) Backend port used by the NAT pool. + - This option has been deprecated, and will be removed in 2.9. Use I(inbound_nat_pools) instead. + natpool_protocol: + description: + - (deprecated) The protocol for the NAT pool. + - This option has been deprecated, and will be removed in 2.9. Use I(inbound_nat_pools) instead. +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Thomas Stringer (@trstringer) + - Yuwei Zhou (@yuwzho) +''' + +EXAMPLES = ''' +- name: create load balancer + azure_rm_loadbalancer: + resource_group: myResourceGroup + name: testloadbalancer1 + frontend_ip_configurations: + - name: frontendipconf0 + public_ip_address: testpip + backend_address_pools: + - name: backendaddrpool0 + probes: + - name: prob0 + port: 80 + inbound_nat_pools: + - name: inboundnatpool0 + frontend_ip_configuration_name: frontendipconf0 + protocol: Tcp + frontend_port_range_start: 80 + frontend_port_range_end: 81 + backend_port: 8080 + load_balancing_rules: + - name: lbrbalancingrule0 + frontend_ip_configuration: frontendipconf0 + backend_address_pool: backendaddrpool0 + frontend_port: 80 + backend_port: 80 + probe: prob0 + inbound_nat_rules: + - name: inboundnatrule0 + backend_port: 8080 + protocol: Tcp + frontend_port: 8080 + frontend_ip_configuration: frontendipconf0 +''' + +RETURN = ''' +state: + description: + - Current state of the load balancer. + returned: always + type: dict +changed: + description: + - Whether or not the resource has changed. + returned: always + type: bool +''' + +import random +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id +from ansible.module_utils._text import to_native +try: + from msrestazure.tools import parse_resource_id + from msrestazure.azure_exceptions import CloudError + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + + +frontend_ip_configuration_spec = dict( + name=dict( + type='str', + required=True + ), + public_ip_address=dict( + type='str' + ), + private_ip_address=dict( + type='str' + ), + private_ip_allocation_method=dict( + type='str' + ), + subnet=dict( + type='str' + ), + zones=dict( + type='list', + elements='str' + ) +) + + +backend_address_pool_spec = dict( + name=dict( + type='str', + required=True + ) +) + + +probes_spec = dict( + name=dict( + type='str', + required=True + ), + port=dict( + type='int', + required=True + ), + protocol=dict( + type='str', + choices=['Tcp', 'Http', 'Https'] + ), + interval=dict( + type='int', + default=15 + ), + fail_count=dict( + type='int', + default=3, + aliases=['number_of_probes'] + ), + request_path=dict( + type='str' + ) +) + + +inbound_nat_pool_spec = dict( + name=dict( + type='str', + required=True + ), + frontend_ip_configuration_name=dict( + type='str', + required=True + ), + protocol=dict( + type='str', + choices=['Tcp', 'Udp', 'All'] + ), + frontend_port_range_start=dict( + type='int', + required=True + ), + frontend_port_range_end=dict( + type='int', + required=True + ), + backend_port=dict( + type='int', + required=True + ) +) + + +inbound_nat_rule_spec = dict( + name=dict( + type='str', + required=True + ), + frontend_ip_configuration=dict( + type='str', + required=True + ), + protocol=dict( + type='str', + choices=['Tcp', 'Udp', 'All'] + ), + frontend_port=dict( + type='int', + required=True + ), + idle_timeout=dict( + type='int' + ), + backend_port=dict( + type='int', + required=True + ), + enable_floating_ip=dict( + type='bool' + ), + enable_tcp_reset=dict( + type='bool' + ) +) + + +load_balancing_rule_spec = dict( + name=dict( + type='str', + required=True + ), + frontend_ip_configuration=dict( + type='str', + required=True + ), + backend_address_pool=dict( + type='str', + required=True + ), + probe=dict( + type='str', + required=True + ), + protocol=dict( + type='str', + choices=['Tcp', 'Udp', 'All'] + ), + load_distribution=dict( + type='str', + choices=['Default', 'SourceIP', 'SourceIPProtocol'], + default='Default' + ), + frontend_port=dict( + type='int', + required=True + ), + backend_port=dict( + type='int' + ), + idle_timeout=dict( + type='int', + default=4 + ), + enable_floating_ip=dict( + type='bool' + ), + disable_outbound_snat=dict( + type='bool', + default=None + ), +) + + +class AzureRMLoadBalancer(AzureRMModuleBase): + """Configuration class for an Azure RM load balancer resource""" + + def __init__(self): + self.module_args = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ), + location=dict( + type='str' + ), + sku=dict( + type='str', + choices=['Basic', 'Standard'] + ), + frontend_ip_configurations=dict( + type='list', + elements='dict', + options=frontend_ip_configuration_spec + ), + backend_address_pools=dict( + type='list', + elements='dict', + options=backend_address_pool_spec + ), + probes=dict( + type='list', + elements='dict', + options=probes_spec + ), + inbound_nat_rules=dict( + type='list', + elements='dict', + options=inbound_nat_rule_spec + ), + inbound_nat_pools=dict( + type='list', + elements='dict', + options=inbound_nat_pool_spec + ), + load_balancing_rules=dict( + type='list', + elements='dict', + options=load_balancing_rule_spec + ), + public_ip_address_name=dict( + type='str', + aliases=['public_ip_address', 'public_ip_name', 'public_ip'] + ), + probe_port=dict( + type='int' + ), + probe_protocol=dict( + type='str', + choices=['Tcp', 'Http', 'Https'] + ), + probe_interval=dict( + type='int', + default=15 + ), + probe_fail_count=dict( + type='int', + default=3 + ), + probe_request_path=dict( + type='str' + ), + protocol=dict( + type='str', + choices=['Tcp', 'Udp'] + ), + load_distribution=dict( + type='str', + choices=['Default', 'SourceIP', 'SourceIPProtocol'] + ), + frontend_port=dict( + type='int' + ), + backend_port=dict( + type='int' + ), + idle_timeout=dict( + type='int', + default=4 + ), + natpool_frontend_port_start=dict( + type='int' + ), + natpool_frontend_port_end=dict( + type='int' + ), + natpool_backend_port=dict( + type='int' + ), + natpool_protocol=dict( + type='str' + ) + ) + + self.resource_group = None + self.name = None + self.location = None + self.sku = None + self.frontend_ip_configurations = None + self.backend_address_pools = None + self.probes = None + self.inbound_nat_rules = None + self.inbound_nat_pools = None + self.load_balancing_rules = None + self.public_ip_address_name = None + self.state = None + self.probe_port = None + self.probe_protocol = None + self.probe_interval = None + self.probe_fail_count = None + self.probe_request_path = None + self.protocol = None + self.load_distribution = None + self.frontend_port = None + self.backend_port = None + self.idle_timeout = None + self.natpool_frontend_port_start = None + self.natpool_frontend_port_end = None + self.natpool_backend_port = None + self.natpool_protocol = None + self.tags = None + + self.results = dict(changed=False, state=dict()) + + super(AzureRMLoadBalancer, self).__init__( + derived_arg_spec=self.module_args, + supports_check_mode=True + ) + + def exec_module(self, **kwargs): + """Main module execution method""" + for key in list(self.module_args.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + changed = False + + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + self.location = resource_group.location + + load_balancer = self.get_load_balancer() + + if self.state == 'present': + # compatible parameters + is_compatible_param = not self.frontend_ip_configurations and not self.backend_address_pools and not self.probes and not self.inbound_nat_pools + is_compatible_param = is_compatible_param and not load_balancer # the instance should not be exist + is_compatible_param = is_compatible_param or self.public_ip_address_name or self.probe_protocol or self.natpool_protocol or self.protocol + if is_compatible_param: + self.deprecate('Discrete load balancer config settings are deprecated and will be removed.' + ' Use frontend_ip_configurations, backend_address_pools, probes, inbound_nat_pools lists instead.', version=(2, 9)) + frontend_ip_name = 'frontendip0' + backend_address_pool_name = 'backendaddrp0' + prob_name = 'prob0' + inbound_nat_pool_name = 'inboundnatp0' + lb_rule_name = 'lbr' + self.frontend_ip_configurations = [dict( + name=frontend_ip_name, + public_ip_address=self.public_ip_address_name + )] + self.backend_address_pools = [dict( + name=backend_address_pool_name + )] + self.probes = [dict( + name=prob_name, + port=self.probe_port, + protocol=self.probe_protocol, + interval=self.probe_interval, + fail_count=self.probe_fail_count, + request_path=self.probe_request_path + )] if self.probe_protocol else None + self.inbound_nat_pools = [dict( + name=inbound_nat_pool_name, + frontend_ip_configuration_name=frontend_ip_name, + protocol=self.natpool_protocol, + frontend_port_range_start=self.natpool_frontend_port_start, + frontend_port_range_end=self.natpool_frontend_port_end, + backend_port=self.natpool_backend_port + )] if self.natpool_protocol else None + self.load_balancing_rules = [dict( + name=lb_rule_name, + frontend_ip_configuration=frontend_ip_name, + backend_address_pool=backend_address_pool_name, + probe=prob_name, + protocol=self.protocol, + load_distribution=self.load_distribution, + frontend_port=self.frontend_port, + backend_port=self.backend_port, + idle_timeout=self.idle_timeout, + enable_floating_ip=False, + )] if self.protocol else None + + # create new load balancer structure early, so it can be easily compared + if not load_balancer: + frontend_ip_configurations_param = [self.network_models.FrontendIPConfiguration( + name=item.get('name'), + public_ip_address=self.get_public_ip_address_instance(item.get('public_ip_address')) if item.get('public_ip_address') else None, + private_ip_address=item.get('private_ip_address'), + private_ip_allocation_method=item.get('private_ip_allocation_method'), + zones=item.get('zones'), + subnet=self.network_models.Subnet( + id=item.get('subnet'), + private_endpoint_network_policies=None, + private_link_service_network_policies=None + ) if item.get('subnet') else None + ) for item in self.frontend_ip_configurations] if self.frontend_ip_configurations else None + else: + old_front = load_balancer.frontend_ip_configurations + new_front = self.frontend_ip_configurations + frontend_ip_configurations_param = [self.network_models.FrontendIPConfiguration( + name=new_front[index].get('name'), + public_ip_address=self.get_public_ip_address_instance( + new_front[index].get('public_ip_address') + ) if new_front[index].get('public_ip_address') else None, + private_ip_address=new_front[index].get('private_ip_address'), + private_ip_allocation_method=new_front[index].get('private_ip_allocation_method'), + zones=new_front[index].get('zones') if new_front[index].get('zones') else None, + subnet=self.network_models.Subnet( + id=new_front[index].get('subnet'), + private_endpoint_network_policies=None, + private_link_service_network_policies=None + ) if new_front[index].get('subnet') else None + ) for index in range(len(new_front))] if new_front else None + + backend_address_pools_param = [self.network_models.BackendAddressPool( + name=item.get('name') + ) for item in self.backend_address_pools] if self.backend_address_pools else None + + probes_param = [self.network_models.Probe( + name=item.get('name'), + port=item.get('port'), + protocol=item.get('protocol'), + interval_in_seconds=item.get('interval'), + request_path=item.get('request_path'), + number_of_probes=item.get('fail_count') + ) for item in self.probes] if self.probes else None + + inbound_nat_pools_param = [self.network_models.InboundNatPool( + name=item.get('name'), + frontend_ip_configuration=self.network_models.SubResource( + id=frontend_ip_configuration_id( + self.subscription_id, + self.resource_group, + self.name, + item.get('frontend_ip_configuration_name'))), + protocol=item.get('protocol'), + frontend_port_range_start=item.get('frontend_port_range_start'), + frontend_port_range_end=item.get('frontend_port_range_end'), + backend_port=item.get('backend_port') + ) for item in self.inbound_nat_pools] if self.inbound_nat_pools else None + + load_balancing_rules_param = [self.network_models.LoadBalancingRule( + name=item.get('name'), + frontend_ip_configuration=self.network_models.SubResource( + id=frontend_ip_configuration_id( + self.subscription_id, + self.resource_group, + self.name, + item.get('frontend_ip_configuration') + ) + ), + backend_address_pool=self.network_models.SubResource( + id=backend_address_pool_id( + self.subscription_id, + self.resource_group, + self.name, + item.get('backend_address_pool') + ) + ), + probe=self.network_models.SubResource( + id=probe_id( + self.subscription_id, + self.resource_group, + self.name, + item.get('probe') + ) + ), + protocol=item.get('protocol'), + load_distribution=item.get('load_distribution'), + frontend_port=item.get('frontend_port'), + backend_port=item.get('backend_port'), + idle_timeout_in_minutes=item.get('idle_timeout'), + enable_floating_ip=item.get('enable_floating_ip'), + disable_outbound_snat=item.get('disable_outbound_snat'), + ) for item in self.load_balancing_rules] if self.load_balancing_rules else None + + inbound_nat_rules_param = [self.network_models.InboundNatRule( + name=item.get('name'), + frontend_ip_configuration=self.network_models.SubResource( + id=frontend_ip_configuration_id( + self.subscription_id, + self.resource_group, + self.name, + item.get('frontend_ip_configuration') + ) + ) if item.get('frontend_ip_configuration') else None, + protocol=item.get('protocol'), + frontend_port=item.get('frontend_port'), + backend_port=item.get('backend_port'), + idle_timeout_in_minutes=item.get('idle_timeout'), + enable_tcp_reset=item.get('enable_tcp_reset'), + enable_floating_ip=item.get('enable_floating_ip') + ) for item in self.inbound_nat_rules] if self.inbound_nat_rules else None + + # construct the new instance, if the parameter is none, keep remote one + self.new_load_balancer = self.network_models.LoadBalancer( + sku=self.network_models.LoadBalancerSku(name=self.sku) if self.sku else None, + location=self.location, + tags=self.tags, + frontend_ip_configurations=frontend_ip_configurations_param, + backend_address_pools=backend_address_pools_param, + probes=probes_param, + inbound_nat_pools=inbound_nat_pools_param, + load_balancing_rules=load_balancing_rules_param, + inbound_nat_rules=inbound_nat_rules_param + ) + + self.new_load_balancer = self.assign_protocol(self.new_load_balancer, load_balancer) + + if load_balancer: + self.new_load_balancer = self.object_assign(self.new_load_balancer, load_balancer) + load_balancer_dict = load_balancer.as_dict() + new_dict = self.new_load_balancer.as_dict() + if not default_compare(new_dict, load_balancer_dict, ''): + changed = True + else: + changed = False + else: + changed = True + elif self.state == 'absent' and load_balancer: + changed = True + + self.results['state'] = load_balancer.as_dict() if load_balancer else {} + if 'tags' in self.results['state']: + update_tags, self.results['state']['tags'] = self.update_tags(self.results['state']['tags']) + if update_tags: + changed = True + else: + if self.tags: + changed = True + self.results['changed'] = changed + + if self.check_mode: + return self.results + + if self.state == 'present' and changed: + self.results['state'] = self.create_or_update_load_balancer(self.new_load_balancer).as_dict() + elif self.state == 'absent' and changed: + self.delete_load_balancer() + self.results['state'] = None + + return self.results + + def get_public_ip_address_instance(self, id): + """Get a reference to the public ip address resource""" + self.log('Fetching public ip address {0}'.format(id)) + resource_id = format_resource_id(id, self.subscription_id, 'Microsoft.Network', 'publicIPAddresses', self.resource_group) + return self.network_models.PublicIPAddress(id=resource_id) + + def get_load_balancer(self): + """Get a load balancer""" + self.log('Fetching loadbalancer {0}'.format(self.name)) + try: + return self.network_client.load_balancers.get(self.resource_group, self.name) + except ResourceNotFoundError: + return None + + def delete_load_balancer(self): + """Delete a load balancer""" + self.log('Deleting loadbalancer {0}'.format(self.name)) + try: + poller = self.network_client.load_balancers.begin_delete(self.resource_group, self.name) + return self.get_poller_result(poller) + except Exception as exc: + self.fail("Error deleting loadbalancer {0} - {1}".format(self.name, str(exc))) + + def create_or_update_load_balancer(self, param): + try: + poller = self.network_client.load_balancers.begin_create_or_update(self.resource_group, self.name, param) + new_lb = self.get_poller_result(poller) + return new_lb + except Exception as exc: + self.fail("Error creating or updating load balancer {0} - {1}".format(self.name, str(exc))) + + def object_assign(self, patch, origin): + attribute_map = set(self.network_models.LoadBalancer._attribute_map.keys()) - set(self.network_models.LoadBalancer._validation.keys()) + for key in attribute_map: + if not getattr(patch, key): + setattr(patch, key, getattr(origin, key)) + return patch + + def assign_protocol(self, patch, origin): + attribute_map = ['probes', 'inbound_nat_rules', 'inbound_nat_pools', 'load_balancing_rules'] + for attribute in attribute_map: + properties = getattr(patch, attribute) + if not properties: + continue + references = getattr(origin, attribute) if origin else [] + for item in properties: + if item.protocol: + continue + refs = [x for x in references if to_native(x.name) == item.name] + ref = refs[0] if len(refs) > 0 else None + item.protocol = ref.protocol if ref else 'Tcp' + return patch + + +def default_compare(new, old, path): + if isinstance(new, dict): + if not isinstance(old, dict): + return False + for k in new.keys(): + if not default_compare(new.get(k), old.get(k, None), path + '/' + k): + return False + return True + elif isinstance(new, list): + if not isinstance(old, list) or len(new) != len(old): + return False + if len(old) == 0: + return True + if isinstance(old[0], dict): + key = None + if 'id' in old[0] and 'id' in new[0]: + key = 'id' + elif 'name' in old[0] and 'name' in new[0]: + key = 'name' + new = sorted(new, key=lambda x: x.get(key, None)) + old = sorted(old, key=lambda x: x.get(key, None)) + else: + new = sorted(new) + old = sorted(old) + for i in range(len(new)): + if not default_compare(new[i], old[i], path + '/*'): + return False + return True + else: + return new == old + + +def frontend_ip_configuration_id(subscription_id, resource_group_name, load_balancer_name, name): + """Generate the id for a frontend ip configuration""" + return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/loadBalancers/{2}/frontendIPConfigurations/{3}'.format( + subscription_id, + resource_group_name, + load_balancer_name, + name + ) + + +def backend_address_pool_id(subscription_id, resource_group_name, load_balancer_name, name): + """Generate the id for a backend address pool""" + return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/loadBalancers/{2}/backendAddressPools/{3}'.format( + subscription_id, + resource_group_name, + load_balancer_name, + name + ) + + +def probe_id(subscription_id, resource_group_name, load_balancer_name, name): + """Generate the id for a probe""" + return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/loadBalancers/{2}/probes/{3}'.format( + subscription_id, + resource_group_name, + load_balancer_name, + name + ) + + +def main(): + """Main execution""" + AzureRMLoadBalancer() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_loadbalancer_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_loadbalancer_info.py new file mode 100644 index 000000000..130e12d5c --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_loadbalancer_info.py @@ -0,0 +1,219 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2016, Thomas Stringer +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_loadbalancer_info + +version_added: "0.1.2" + +short_description: Get load balancer facts + +description: + - Get facts for a specific load balancer or all load balancers. + +options: + name: + description: + - Limit results to a specific resource group. + type: str + resource_group: + description: + - The resource group to search for the desired load balancer. + type: str + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Thomas Stringer (@trstringer) +''' + +EXAMPLES = ''' + - name: Get facts for one load balancer + azure_rm_loadbalancer_info: + name: Testing + resource_group: myResourceGroup + + - name: Get facts for all load balancers + azure_rm_loadbalancer_info: + + - name: Get facts for all load balancers in a specific resource group + azure_rm_loadbalancer_info: + resource_group: myResourceGroup + + - name: Get facts by tags + azure_rm_loadbalancer_info: + tags: + - testing +''' + +RETURN = ''' +loadbalancers: + description: + - Gets a list of load balancers. + returned: always + type: list + elements: dict + sample: [ + { + "etag": "1c83ade9-9dee-4027-860a-d5fabacc184f", + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myAzureResourceGroup/providers/ + Microsoft.Network/loadBalancers/testloadbalancer1", + "location": "centralindia", + "name": "testloadbalancer1", + "properties": { + "backendAddressPools": [], + "frontendIPConfigurations": [ + { + "etag": "1c83ade9-9dee-4027-860a-d5fabacc184f", + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myAzureResourceGroup/ + providers/Microsoft.Network/loadBalancers/testloadbalancer1/ + frontendIPConfigurations/frontendipconf0", + "name": "frontendipconf0", + "properties": { + "privateIPAllocationMethod": "Dynamic", + "provisioningState": "Succeeded", + "publicIPAddress": { + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/ + myAzureResourceGroup/providers/Microsoft.Network/publicIPAddresses/testpip" + } + }, + "zones": ["1", "2", "3"], + "type": "Microsoft.Network/loadBalancers/frontendIPConfigurations" + } + ], + "inboundNatPools": [], + "inboundNatRules": [], + "loadBalancingRules": [], + "outboundRules": [], + "probes": [], + "provisioningState": "Succeeded", + "resourceGuid": "0b31ab3e-7c55-438d-92a0-5acdc99b5277" + }, + "sku": { + "name": "Standard" + }, + "type": "Microsoft.Network/loadBalancers" + } + ] +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.common import AzureHttpError +except Exception: + # handled in azure_rm_common + pass + +AZURE_OBJECT_CLASS = 'LoadBalancer' + + +class AzureRMLoadBalancerInfo(AzureRMModuleBase): + """Utility class to get load balancer facts""" + + def __init__(self): + + self.module_args = dict( + name=dict(type='str'), + resource_group=dict(type='str'), + tags=dict(type='list', elements='str') + ) + + self.results = dict( + changed=False, + loadbalancers=[] + ) + + self.name = None + self.resource_group = None + self.tags = None + + super(AzureRMLoadBalancerInfo, self).__init__( + derived_arg_spec=self.module_args, + supports_check_mode=True, + supports_tags=False, + facts_module=True + ) + + def exec_module(self, **kwargs): + + is_old_facts = self.module._name == 'azure_rm_loadbalancer_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_loadbalancer_facts' module has been renamed to 'azure_rm_loadbalancer_info'", version=(2.9, )) + + for key in self.module_args: + setattr(self, key, kwargs[key]) + + self.results['loadbalancers'] = ( + self.get_item() if self.name + else self.list_items() + ) + + return self.results + + def get_item(self): + """Get a single load balancer""" + + self.log('Get properties for {0}'.format(self.name)) + + item = None + result = [] + + try: + item = self.network_client.load_balancers.get(self.resource_group, self.name) + except ResourceNotFoundError: + pass + + if item and self.has_tags(item.tags, self.tags): + result = [self.serialize_obj(item, AZURE_OBJECT_CLASS)] + + return result + + def list_items(self): + """Get all load balancers""" + + self.log('List all load balancers') + + if self.resource_group: + try: + response = self.network_client.load_balancers.list(self.resource_group) + except ResourceNotFoundError as exc: + self.fail('Failed to list items in resource group {0} - {1}'.format(self.resource_group, str(exc))) + else: + try: + response = self.network_client.load_balancers.list_all() + except ResourceNotFoundError as exc: + self.fail('Failed to list all items - {0}'.format(str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS)) + + return results + + +def main(): + """Main module execution code path""" + + AzureRMLoadBalancerInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_lock.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_lock.py new file mode 100644 index 000000000..45fd38bc6 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_lock.py @@ -0,0 +1,223 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Yuwei Zhou, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_lock +version_added: "0.1.2" +short_description: Manage Azure locks +description: + - Create, delete an Azure lock. + - To create or delete management locks, you must have access to Microsoft.Authorization/* or Microsoft.Authorization/locks/* actions. + - Of the built-in roles, only Owner and User Access Administrator are granted those actions. +options: + name: + description: + - Name of the lock. + type: str + required: true + managed_resource_id: + description: + - Manage a lock for the specified resource ID. + - Mutually exclusive with I(resource_group). + - If neither I(managed_resource_id) or I(resource_group) are specified, manage a lock for the current subscription. + - "'/subscriptions/{subscriptionId}' for subscriptions." + - "'/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}' for resource groups." + - "'/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{namespace}/{resourceType}/{resourceName}' for resources." + type: str + resource_group: + description: + - Manage a lock for the named resource group. + - Mutually exclusive with I(managed_resource_id). + - If neither I(managed_resource_id) or I(resource_group) are specified, manage a lock for the current subscription. + type: str + notes: + description: + - Notes about the lock. Maximum of 512 characters. + type: str + state: + description: + - State of the lock. + - Use C(present) to create or update a lock and C(absent) to delete a lock. + type: str + default: present + choices: + - absent + - present + level: + description: + - The lock level type. + type: str + choices: + - can_not_delete + - read_only +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Yuwei Zhou (@yuwzho) + +''' + +EXAMPLES = ''' +- name: Create a lock for a resource + azure_rm_lock: + managed_resource_id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/Microsoft.Compute/virtualMachines/myVM + name: myLock + level: read_only + +- name: Create a lock for a resource group + azure_rm_lock: + managed_resource_id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup + name: myLock + level: read_only + +- name: Create a lock for a resource group + azure_rm_lock: + resource_group: myResourceGroup + name: myLock + notes: description_lock + level: read_only + +- name: Create a lock for a subscription + azure_rm_lock: + name: myLock + level: read_only +''' + +RETURN = ''' +id: + description: + - Resource ID of the lock. + returned: success + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Authorization/locks/keep" +''' # NOQA + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +try: + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMLock(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + resource_group=dict(type='str'), + managed_resource_id=dict(type='str'), + notes=dict(type='str'), + level=dict(type='str', choices=['can_not_delete', 'read_only']) + ) + + self.results = dict( + changed=False, + id=None + ) + + required_if = [ + ('state', 'present', ['level']) + ] + + mutually_exclusive = [['resource_group', 'managed_resource_id']] + + self.name = None + self.state = None + self.level = None + self.resource_group = None + self.note = None + self.managed_resource_id = None + + super(AzureRMLock, self).__init__(self.module_arg_spec, + supports_check_mode=True, + required_if=required_if, + mutually_exclusive=mutually_exclusive, + supports_tags=False) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec.keys(): + setattr(self, key, kwargs[key]) + + changed = False + # construct scope id + scope = self.get_scope() + lock = self.get_lock(scope) + if self.state == 'present': + lock_level = getattr(self.lock_models.LockLevel, self.level) + if not lock: + changed = True + lock = self.lock_models.ManagementLockObject(level=lock_level, notes=self.notes) + else: + if lock.level != lock_level: + self.log('Lock level changed') + lock.level = lock_level + changed = True + if lock.notes != self.notes: + self.log('Lock notes changed') + lock.notes = self.notes + changed = True + + if not self.check_mode: + lock = self.create_or_update_lock(scope, lock) + self.results['id'] = lock.id + elif lock: + changed = True + if not self.check_mode: + self.delete_lock(scope) + self.results['changed'] = changed + return self.results + + def delete_lock(self, scope): + try: + return self.lock_client.management_locks.delete_by_scope(scope, self.name) + except Exception as exc: + self.fail('Error when deleting lock {0} for {1}: {2}'.format(self.name, scope, exc.message)) + + def create_or_update_lock(self, scope, lock): + try: + return self.lock_client.management_locks.create_or_update_by_scope(scope, self.name, lock) + except Exception as exc: + self.fail('Error when creating or updating lock {0} for {1}: {2}'.format(self.name, scope, exc.message)) + + def get_lock(self, scope): + try: + return self.lock_client.management_locks.get_by_scope(scope, self.name) + except ResourceNotFoundError as exc: + if exc.status_code in [404]: + return None + self.fail('Error when getting lock {0} for {1}: {2}'.format(self.name, scope, exc.message)) + + def get_scope(self): + ''' + Get the resource scope of the lock management. + '/subscriptions/{subscriptionId}' for subscriptions, + '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}' for resource groups, + '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{namespace}/{resourceType}/{resourceName}' for resources. + ''' + if self.managed_resource_id: + return self.managed_resource_id + elif self.resource_group: + return '/subscriptions/{0}/resourcegroups/{1}'.format(self.subscription_id, self.resource_group) + else: + return '/subscriptions/{0}'.format(self.subscription_id) + + +def main(): + AzureRMLock() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_lock_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_lock_info.py new file mode 100644 index 000000000..ef9f3e303 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_lock_info.py @@ -0,0 +1,222 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Yuwei Zhou, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_lock_info +version_added: "0.1.2" +short_description: Manage Azure locks +description: + - Create, delete an Azure lock. +options: + name: + description: + - Name of the lock. + type: str + required: true + managed_resource_id: + description: + - ID of the resource where need to manage the lock. + - Get this via facts module. + - Cannot be set mutual with I(resource_group). + - Manage subscription if both I(managed_resource_id) and I(resource_group) not defined. + - "'/subscriptions/{subscriptionId}' for subscriptions." + - "'/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}' for resource groups." + - "'/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{namespace}/{resourceType}/{resourceName}' for resources." + - Can get all locks with 'child scope' for this resource, use I(managed_resource_id) in response for further management. + type: str + resource_group: + description: + - Resource group name where need to manage the lock. + - The lock is in the resource group level. + - Cannot be set mutual with I(managed_resource_id). + - Query subscription if both I(managed_resource_id) and I(resource_group) not defined. + - Can get all locks with 'child scope' in this resource group, use the I(managed_resource_id) in response for further management. + type: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Yuwei Zhou (@yuwzho) + +''' + +EXAMPLES = ''' +- name: Get myLock details of myVM + azure_rm_lock_info: + name: myLock + managed_resource_id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/Microsoft.Compute/virtualMachines/myVM + +- name: List locks of myVM + azure_rm_lock_info: + managed_resource_id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/Microsoft.Compute/virtualMachines/myVM + +- name: List locks of myResourceGroup + azure_rm_lock_info: + resource_group: myResourceGroup + +- name: List locks of myResourceGroup + azure_rm_lock_info: + managed_resource_id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup + +- name: List locks of mySubscription + azure_rm_lock_info: + +- name: List locks of mySubscription + azure_rm_lock_info: + managed_resource_id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +''' + +RETURN = ''' +locks: + description: + - List of locks dicts. + returned: always + type: complex + contains: + id: + description: + - ID of the Lock. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Authorization/locks/myLock" + name: + description: + - Name of the lock. + returned: always + type: str + sample: myLock + level: + description: + - Type level of the lock. + returned: always + type: str + sample: can_not_delete + notes: + description: + - Notes of the lock added by creator. + returned: always + type: str + sample: "This is a lock" +''' # NOQA + +import json +import re +from ansible.module_utils.common.dict_transformations import _camel_to_snake +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_rest import GenericRestClient + +try: + from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + from msrestazure.azure_exceptions import CloudError +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMLockInfo(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type='str'), + resource_group=dict(type='str'), + managed_resource_id=dict(type='str') + ) + + self.results = dict( + changed=False, + locks=[] + ) + + mutually_exclusive = [['resource_group', 'managed_resource_id']] + + self.name = None + self.resource_group = None + self.managed_resource_id = None + self._mgmt_client = None + self._query_parameters = {'api-version': '2016-09-01'} + self._header_parameters = {'Content-Type': 'application/json; charset=utf-8'} + + super(AzureRMLockInfo, self).__init__(self.module_arg_spec, + supports_check_mode=True, + facts_module=True, + mutually_exclusive=mutually_exclusive, + supports_tags=False) + + def exec_module(self, **kwargs): + + is_old_facts = self.module._name == 'azure_rm_lock_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_lock_facts' module has been renamed to 'azure_rm_lock_info'", version=(2.9, )) + + for key in self.module_arg_spec.keys(): + setattr(self, key, kwargs[key]) + + self._mgmt_client = self.get_mgmt_svc_client(GenericRestClient, base_url=self._cloud_environment.endpoints.resource_manager) + changed = False + # construct scope id + scope = self.get_scope() + url = '/{0}/providers/Microsoft.Authorization/locks'.format(scope) + if self.name: + url = '{0}/{1}'.format(url, self.name) + locks = self.list_locks(url) + resp = locks.get('value') if 'value' in locks else [locks] + self.results['locks'] = [self.to_dict(x) for x in resp] + return self.results + + def to_dict(self, lock): + resp = dict( + id=lock['id'], + name=lock['name'], + level=_camel_to_snake(lock['properties']['level']), + managed_resource_id=re.sub('/providers/Microsoft.Authorization/locks/.+', '', lock['id']) + ) + if lock['properties'].get('notes'): + resp['notes'] = lock['properties']['notes'] + if lock['properties'].get('owners'): + resp['owners'] = [x['application_id'] for x in lock['properties']['owners']] + return resp + + def list_locks(self, url): + try: + resp = self._mgmt_client.query(url=url, + method='GET', + query_parameters=self._query_parameters, + header_parameters=self._header_parameters, + body=None, + expected_status_codes=[200], + polling_timeout=None, + polling_interval=None) + return json.loads(resp.text) + except CloudError as exc: + self.fail('Error when finding locks {0}: {1}'.format(url, exc.message)) + + def get_scope(self): + ''' + Get the resource scope of the lock management. + '/subscriptions/{subscriptionId}' for subscriptions, + '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}' for resource groups, + '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{namespace}/{resourceType}/{resourceName}' for resources. + ''' + if self.managed_resource_id: + return self.managed_resource_id + elif self.resource_group: + return '/subscriptions/{0}/resourcegroups/{1}'.format(self.subscription_id, self.resource_group) + else: + return '/subscriptions/{0}'.format(self.subscription_id) + + +def main(): + AzureRMLockInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_loganalyticsworkspace.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_loganalyticsworkspace.py new file mode 100644 index 000000000..7e8acf260 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_loganalyticsworkspace.py @@ -0,0 +1,337 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Yuwei Zhou, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_loganalyticsworkspace +version_added: "0.1.2" +short_description: Manage Azure Log Analytics workspaces +description: + - Create, delete Azure Log Analytics workspaces. +options: + resource_group: + description: + - Name of resource group. + required: true + name: + description: + - Name of the workspace. + required: true + state: + description: + - Assert the state of the image. Use C(present) to create or update a image and C(absent) to delete an image. + default: present + choices: + - absent + - present + location: + description: + - Resource location. + sku: + description: + - The SKU of the workspace. + choices: + - free + - standard + - premium + - unlimited + - per_node + - per_gb2018 + - standalone + default: per_gb2018 + retention_in_days: + description: + - The workspace data retention in days. + - -1 means Unlimited retention for I(sku=unlimited). + - 730 days is the maximum allowed for all other SKUs. + intelligence_packs: + description: + - Manage intelligence packs possible for this workspace. + - Enable one pack by setting it to C(true). For example "Backup:true". + - Disable one pack by setting it to C(false). For example "Backup:false". + - Other intelligence packs not list in this property will not be changed. + type: dict + force: + description: + - Deletes the workspace without the recovery option. A workspace that was deleted with this flag cannot be recovered. + default: false + type: bool +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Yuwei Zhou (@yuwzho) +''' + +EXAMPLES = ''' +- name: Create a workspace with backup enabled + azure_rm_loganalyticsworkspace: + resource_group: myResourceGroup + name: myLogAnalyticsWorkspace + intelligence_pack: + Backup: true +''' + +RETURN = ''' +id: + description: + - Workspace resource path. + type: str + returned: success + example: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.OperationalInsights/workspaces/m + yLogAnalyticsWorkspace" +location: + description: + - Resource location. + type: str + returned: success + example: eastus +sku: + description: + - The SKU of the workspace. + type: str + returned: success + example: "per_gb2018" +retention_in_days: + description: + - The workspace data retention in days. + - -1 means Unlimited retention for I(sku=unlimited). + - 730 days is the maximum allowed for all other SKUs. + type: int + returned: success + example: 40 +intelligence_packs: + description: + - Lists all the intelligence packs possible and whether they are enabled or disabled for a given workspace. + type: list + returned: success + example: ['name': 'CapacityPerformance', 'enabled': true] +management_groups: + description: + - Management groups connected to the workspace. + type: dict + returned: success + example: {'value': []} +shared_keys: + description: + - Shared keys for the workspace. + type: dict + returned: success + example: { + 'primarySharedKey': 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx', + 'secondarySharedKey': 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx' + } +usages: + description: + - Usage metrics for the workspace. + type: dict + returned: success + example: { + 'value': [ + { + 'name': { + 'value': 'DataAnalyzed', + 'localizedValue': 'Data Analyzed' + }, + 'unit': 'Bytes', + 'currentValue': 0, + 'limit': 524288000, + 'nextResetTime': '2017-10-03T00:00:00Z', + 'quotaPeriod': 'P1D' + } + ] + } +''' # NOQA + +from ansible.module_utils.common.dict_transformations import _snake_to_camel, _camel_to_snake + +try: + from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id + from msrestazure.tools import parse_resource_id + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMLogAnalyticsWorkspace(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True), + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + location=dict(type='str'), + sku=dict(type='str', default='per_gb2018', choices=['free', 'standard', 'premium', 'unlimited', 'per_node', 'per_gb2018', 'standalone']), + retention_in_days=dict(type='int'), + intelligence_packs=dict(type='dict'), + force=dict(type='bool', default=False) + ) + + self.results = dict( + changed=False, + id=None + ) + + self.resource_group = None + self.name = None + self.state = None + self.location = None + self.sku = None + self.retention_in_days = None + self.intelligence_packs = None + self.force = None + + super(AzureRMLogAnalyticsWorkspace, self).__init__(self.module_arg_spec, supports_check_mode=True) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + self.results = dict() + changed = False + + if not self.location: + resource_group = self.get_resource_group(self.resource_group) + self.location = resource_group.location + + if self.sku == 'per_gb2018': + self.sku = 'PerGB2018' + else: + self.sku = _snake_to_camel(self.sku) + workspace = self.get_workspace() + if not workspace and self.state == 'present': + changed = True + workspace = self.log_analytics_models.Workspace(sku=self.log_analytics_models.WorkspaceSku(name=self.sku), + retention_in_days=self.retention_in_days, + location=self.location, + tags=self.tags) + if not self.check_mode: + workspace = self.create_workspace(workspace) + elif workspace and self.state == 'present': + if workspace.retention_in_days != self.retention_in_days: + changed = True + results = dict() + update_tags, results['tags'] = self.update_tags(workspace.tags) + if update_tags: + changed = True + if not self.check_mode and changed: + workspace = self.log_analytics_models.Workspace(sku=self.log_analytics_models.WorkspaceSku(name=self.sku), + retention_in_days=self.retention_in_days, + location=self.location, + tags=results['tags']) + workspace = self.create_workspace(workspace) + elif workspace and self.state == 'absent': + changed = True + workspace = None + if not self.check_mode: + self.delete_workspace() + if workspace and workspace.id: + self.results = self.to_dict(workspace) + self.results['intelligence_packs'] = self.list_intelligence_packs() + self.results['management_groups'] = self.list_management_groups() + self.results['usages'] = self.list_usages() + self.results['shared_keys'] = self.get_shared_keys() + # handle the intelligence pack + if workspace and workspace.id and self.intelligence_packs: + intelligence_packs = self.results['intelligence_packs'] + for key in self.intelligence_packs.keys(): + enabled = self.intelligence_packs[key] + for x in intelligence_packs: + if x['name'].lower() == key.lower(): + if x['enabled'] != enabled: + changed = True + if not self.check_mode: + self.change_intelligence(x['name'], enabled) + x['enabled'] = enabled + break + self.results['changed'] = changed + return self.results + + def create_workspace(self, workspace): + try: + poller = self.log_analytics_client.workspaces.begin_create_or_update(self.resource_group, self.name, workspace) + return self.get_poller_result(poller) + except Exception as exc: + self.fail('Error when creating workspace {0} - {1}'.format(self.name, exc.message or str(exc))) + + def get_workspace(self): + try: + return self.log_analytics_client.workspaces.get(self.resource_group, self.name) + except ResourceNotFoundError: + pass + + def delete_workspace(self): + try: + self.log_analytics_client.workspaces.begin_delete(self.resource_group, self.name, force=self.force) + except Exception as exc: + self.fail('Error when deleting workspace {0} - {1}'.format(self.name, exc.message or str(exc))) + + def to_dict(self, workspace): + result = workspace.as_dict() + result['sku'] = _camel_to_snake(workspace.sku.name) + return result + + def list_intelligence_packs(self): + try: + response = self.log_analytics_client.intelligence_packs.list(self.resource_group, self.name) + return [x.as_dict() for x in response] + except Exception as exc: + self.fail('Error when listing intelligence packs {0}'.format(exc.message or str(exc))) + + def change_intelligence(self, key, value): + try: + if value: + self.log_analytics_client.intelligence_packs.enable(self.resource_group, self.name, key) + else: + self.log_analytics_client.intelligence_packs.disable(self.resource_group, self.name, key) + except Exception as exc: + self.fail('Error when changing intelligence pack {0} - {1}'.format(key, exc.message or str(exc))) + + def list_management_groups(self): + result = [] + try: + response = self.log_analytics_client.management_groups.list(self.resource_group, self.name) + while True: + result.append(response.next().as_dict()) + except StopIteration: + pass + except Exception as exc: + self.fail('Error when listing management groups {0}'.format(exc.message or str(exc))) + return result + + def list_usages(self): + result = [] + try: + response = self.log_analytics_client.usages.list(self.resource_group, self.name) + while True: + result.append(response.next().as_dict()) + except StopIteration: + pass + except Exception as exc: + self.fail('Error when listing usages {0}'.format(exc.message or str(exc))) + return result + + def get_shared_keys(self): + try: + return self.log_analytics_client.shared_keys.get_shared_keys(self.resource_group, self.name).as_dict() + except Exception as exc: + self.fail('Error when getting shared key {0}'.format(exc.message or str(exc))) + + +def main(): + AzureRMLogAnalyticsWorkspace() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_loganalyticsworkspace_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_loganalyticsworkspace_info.py new file mode 100644 index 000000000..4861954da --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_loganalyticsworkspace_info.py @@ -0,0 +1,266 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Yuwei Zhou, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_loganalyticsworkspace_info +version_added: "0.1.2" +short_description: Get facts of Azure Log Analytics workspaces +description: + - Get, query Azure Log Analytics workspaces. +options: + resource_group: + description: + - Name of resource group. + required: True + name: + description: + - Name of the workspace. + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + show_intelligence_packs: + description: + - Show the intelligence packs for a workspace. + - Note this will cost one more network overhead for each workspace, expected slow response. + show_management_groups: + description: + - Show the management groups for a workspace. + - Note this will cost one more network overhead for each workspace, expected slow response. + show_shared_keys: + description: + - Show the shared keys for a workspace. + - Note this will cost one more network overhead for each workspace, expected slow response. + show_usages: + description: + - Show the list of usages for a workspace. + - Note this will cost one more network overhead for each workspace, expected slow response. +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Yuwei Zhou (@yuwzho) + +''' + +EXAMPLES = ''' +- name: Query a workspace + azure_rm_loganalyticsworkspace_info: + resource_group: myResourceGroup + name: myLogAnalyticsWorkspace + show_intelligence_packs: true + show_management_groups: true + show_shared_keys: true + show_usages: true +''' + +RETURN = ''' +id: + description: + - Workspace resource path. + type: str + returned: success + example: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.OperationalInsights/workspaces/m + yLogAnalyticsWorkspace" +location: + description: + - Resource location. + type: str + returned: success + example: "eastus" +sku: + description: + - The SKU of the workspace. + type: str + returned: success + example: "per_gb2018" +retention_in_days: + description: + - The workspace data retention in days. + - -1 means Unlimited retention for I(sku=unlimited). + - 730 days is the maximum allowed for all other SKUs. + type: int + returned: success + example: 40 +intelligence_packs: + description: + - Lists all the intelligence packs possible and whether they are enabled or disabled for a given workspace. + type: list + returned: success + example: [ {'name': 'CapacityPerformance', 'enabled': true} ] +management_groups: + description: + - Management groups connected to the workspace. + type: dict + returned: success + example: {'value': []} +shared_keys: + description: + - Shared keys for the workspace. + type: dict + returned: success + example: { + 'primarySharedKey': 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx', + 'secondarySharedKey': 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx' + } +usages: + description: + - Usage metrics for the workspace. + type: dict + returned: success + example: { + 'value': [ + { + 'name': { + 'value': 'DataAnalyzed', + 'localizedValue': 'Data Analyzed' + }, + 'unit': 'Bytes', + 'currentValue': 0, + 'limit': 524288000, + 'nextResetTime': '2017-10-03T00:00:00Z', + 'quotaPeriod': 'P1D' + } + ] + } +''' # NOQA + +from ansible.module_utils.common.dict_transformations import _snake_to_camel, _camel_to_snake + +try: + from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id + from msrestazure.tools import parse_resource_id + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMLogAnalyticsWorkspaceInfo(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True), + name=dict(type='str'), + tags=dict(type='list', elements='str'), + show_shared_keys=dict(type='bool'), + show_intelligence_packs=dict(type='bool'), + show_usages=dict(type='bool'), + show_management_groups=dict(type='bool') + ) + + self.results = dict( + changed=False, + workspaces=[] + ) + + self.resource_group = None + self.name = None + self.tags = None + self.show_intelligence_packs = None + self.show_shared_keys = None + self.show_usages = None + self.show_management_groups = None + + super(AzureRMLogAnalyticsWorkspaceInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False, facts_module=True) + + def exec_module(self, **kwargs): + + is_old_facts = self.module._name == 'azure_rm_loganalyticsworkspace_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_loganalyticsworkspace_facts' module has been renamed to 'azure_rm_loganalyticsworkspace_info'", + version=(2.9, )) + + for key in list(self.module_arg_spec.keys()): + setattr(self, key, kwargs[key]) + + if self.name: + item = self.get_workspace() + response = [item] if item else [] + else: + response = self.list_by_resource_group() + + self.results['workspaces'] = [self.to_dict(x) for x in response if self.has_tags(x.tags, self.tags)] + return self.results + + def get_workspace(self): + try: + return self.log_analytics_client.workspaces.get(self.resource_group, self.name) + except ResourceNotFoundError: + pass + return None + + def list_by_resource_group(self): + try: + return self.log_analytics_client.resource_group.list(self.resource_group) + except Exception: + pass + return [] + + def list_intelligence_packs(self): + try: + response = self.log_analytics_client.intelligence_packs.list(self.resource_group, self.name) + return [x.as_dict() for x in response] + except Exception as exc: + self.fail('Error when listing intelligence packs {0}'.format(exc.message or str(exc))) + + def list_management_groups(self): + result = [] + try: + response = self.log_analytics_client.management_groups.list(self.resource_group, self.name) + while True: + result.append(response.next().as_dict()) + except StopIteration: + pass + except Exception as exc: + self.fail('Error when listing management groups {0}'.format(exc.message or str(exc))) + return result + + def list_usages(self): + result = [] + try: + response = self.log_analytics_client.usages.list(self.resource_group, self.name) + while True: + result.append(response.next().as_dict()) + except StopIteration: + pass + except Exception as exc: + self.fail('Error when listing usages {0}'.format(exc.message or str(exc))) + return result + + def get_shared_keys(self): + try: + return self.log_analytics_client.shared_keys.get_shared_keys(self.resource_group, self.name).as_dict() + except Exception as exc: + self.fail('Error when getting shared key {0}'.format(exc.message or str(exc))) + + def to_dict(self, workspace): + result = workspace.as_dict() + result['sku'] = _camel_to_snake(workspace.sku.name) + if self.show_intelligence_packs: + result['intelligence_packs'] = self.list_intelligence_packs() + if self.show_management_groups: + result['management_groups'] = self.list_management_groups() + if self.show_shared_keys: + result['shared_keys'] = self.get_shared_keys() + if self.show_usages: + result['usages'] = self.list_usages() + return result + + +def main(): + AzureRMLogAnalyticsWorkspaceInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_manageddisk.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_manageddisk.py new file mode 100644 index 000000000..e11965381 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_manageddisk.py @@ -0,0 +1,656 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Bruno Medina Bolanos Cacho +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_manageddisk + +version_added: "0.1.2" + +short_description: Manage Azure Manage Disks + +description: + - Create, update and delete an Azure Managed Disk. + +notes: + - This module was called M(azure.azcollection.azure_rm_managed_disk) before Ansible 2.8. The usage did not change. + +options: + resource_group: + description: + - Name of a resource group where the managed disk exists or will be created. + required: true + name: + description: + - Name of the managed disk. + required: true + state: + description: + - Assert the state of the managed disk. Use C(present) to create or update a managed disk and C(absent) to delete a managed disk. + default: present + choices: + - absent + - present + location: + description: + - Valid Azure location. Defaults to location of the resource group. + storage_account_type: + description: + - Type of storage for the managed disk. + - If not specified, the disk is created as C(Standard_LRS). + - C(Standard_LRS) is for Standard HDD. + - C(StandardSSD_LRS) (added in 2.8) is for Standard SSD. + - C(StandardSSD_ZRS) is for Standard SSD Zone-redundant. + - C(Premium_LRS) is for Premium SSD. + - C(Premium_ZRS) is for Premium SSD Zone-redundant. + - C(UltraSSD_LRS) (added in 2.8) is for Ultra SSD, which is only available on select instance types. + - See U(https://docs.microsoft.com/en-us/azure/virtual-machines/windows/disks-types) for more information about disk types. + choices: + - Standard_LRS + - StandardSSD_LRS + - StandardSSD_ZRS + - Premium_LRS + - Premium_ZRS + - UltraSSD_LRS + create_option: + description: + - C(import) from a VHD file in I(source_uri) and C(copy) from previous managed disk I(source_uri). + choices: + - empty + - import + - copy + storage_account_id: + description: + - The full path to the storage account the image is to be imported from. + - Required when I(create_option=import). + type: str + source_uri: + description: + - URI to a valid VHD file to be used or the resource ID of the managed disk to copy. + aliases: + - source_resource_uri + os_type: + description: + - Type of Operating System. + - Used when I(create_option=copy) or I(create_option=import) and the source is an OS disk. + - If omitted during creation, no value is set. + - If omitted during an update, no change is made. + - Once set, this value cannot be cleared. + choices: + - linux + - windows + disk_size_gb: + description: + - Size in GB of the managed disk to be created. + - If I(create_option=copy) then the value must be greater than or equal to the source's size. + managed_by: + description: + - Name of an existing virtual machine with which the disk is or will be associated, this VM should be in the same resource group. + - To detach a disk from a vm, explicitly set to ''. + - If this option is unset, the value will not be changed. + managed_by_extended: + description: + - List of name and resource group of the VMs that have the disk attached. + - I(max_shares) should be set to a value greater than one for disks to allow attaching them to multiple VMs. + type: list + elements: dict + suboptions: + resource_group: + description: + - The resource group of the attache VM. + type: str + name: + description: + - The name of the attache VM. + type: str + max_shares: + description: + - The maximum number of VMs that can attach to the disk at the same time. + - Value greater than one indicates a disk that can be mounted on multiple VMs at the same time. + type: int + attach_caching: + description: + - Disk caching policy controlled by VM. Will be used when attached to the VM defined by C(managed_by). + - If this option is different from the current caching policy, the managed disk will be deattached and attached with current caching option again. + choices: + - '' + - read_only + - read_write + zone: + description: + - The Azure managed disk's zone. + - Allowed values are C(1), C(2), C(3) and C(''). + choices: + - '1' + - '2' + - '3' + - '' + lun: + description: + - The logical unit number for data disk. + - This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM. + type: int + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags +author: + - Bruno Medina (@brusMX) +''' + +EXAMPLES = ''' + - name: Create managed disk + azure_rm_manageddisk: + name: mymanageddisk + location: eastus + resource_group: myResourceGroup + disk_size_gb: 4 + + - name: Create managed operating system disk from page blob + azure_rm_manageddisk: + name: mymanageddisk + location: eastus2 + resource_group: myResourceGroup + create_option: import + source_uri: https://storageaccountname.blob.core.windows.net/containername/blob-name.vhd + storage_account_id: /subscriptions//resourceGroups/myResourceGroup/providers/Microsoft.Storage/storageAccounts/storageaccountname + os_type: windows + storage_account_type: Premium_LRS + + - name: Mount the managed disk to VM + azure_rm_manageddisk: + name: mymanageddisk + location: eastus + resource_group: myResourceGroup + disk_size_gb: 4 + managed_by: testvm001 + attach_caching: read_only + + - name: Mount the managed disk to multiple VMs + azure_rm_manageddisk: + resource_group: myResourceGroup + name: freddisk04 + max_shares: 4 + disk_size_gb: 1024 + storage_account_type: Premium_LRS + managed_by_extended: + - resource_group: myResourceGroup01 + name: testVM01 + - resource_group: myResourceGroup02 + name: testVM02 + zone: 1 + + - name: Unmount the managed disk to VM + azure_rm_manageddisk: + name: mymanageddisk + location: eastus + resource_group: myResourceGroup + managed_by: '' + disk_size_gb: 4 + + - name: Delete managed disk + azure_rm_manageddisk: + name: mymanageddisk + location: eastus + resource_group: myResourceGroup + state: absent +''' + +RETURN = ''' +state: + description: + - Current state of the managed disk. + returned: always + type: complex + contains: + id: + description: + - Resource id. + type: str + name: + description: + - Name of the managed disk. + type: str + location: + description: + - Valid Azure location. + type: str + storage_account_type: + description: + - Type of storage for the managed disk. + - See U(https://docs.microsoft.com/en-us/azure/virtual-machines/windows/disks-types) for more information about this type. + type: str + sample: Standard_LRS + create_option: + description: + - Create option of the disk. + type: str + sample: copy + storage_account_id: + description: + - The full path to the storage account the image is to be imported from + type: str + sample: /subscriptions//resourceGroups//providers/Microsoft.Storage/storageAccounts/ + source_uri: + description: + - URI to a valid VHD file to be used or the resource ID of the managed disk to copy. + type: str + os_type: + description: + - Type of Operating System. + type: str + sample: linux + disk_size_gb: + description: + - Size in GB of the managed disk to be created. + type: str + managed_by: + description: + - Name of an existing virtual machine with which the disk is or will be associated, this VM should be in the same resource group. + type: str + max_shares: + description: + - The maximum number of VMs that can attach to the disk at the same time. + - Value greater than one indicates a disk that can be mounted on multiple VMs at the same time. + type: int + sample: 3 + managed_by_extended: + description: + - List ID of an existing virtual machine with which the disk is or will be associated. + type: list + sample: ["/subscriptions/xxx-xxx/resourceGroups/myRG/providers/Microsoft.Compute/virtualMachines/testVM"] + tags: + description: + - Tags to assign to the managed disk. + type: dict + sample: { "tag": "value" } +changed: + description: + - Whether or not the resource has changed. + returned: always + type: bool +''' + +import re + + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +try: + from concurrent.futures import ThreadPoolExecutor + import multiprocessing + from msrestazure.tools import parse_resource_id + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + + +managed_by_extended_spec = dict( + resource_group=dict(type='str'), + name=dict(type='str') +) + + +# duplicated in azure_rm_manageddisk_facts +def managed_disk_to_dict(managed_disk): + create_data = managed_disk.creation_data + return dict( + id=managed_disk.id, + name=managed_disk.name, + location=managed_disk.location, + tags=managed_disk.tags, + create_option=create_data.create_option.lower(), + source_uri=create_data.source_uri or create_data.source_resource_id, + disk_size_gb=managed_disk.disk_size_gb, + os_type=managed_disk.os_type.lower() if managed_disk.os_type else None, + storage_account_type=managed_disk.sku.name if managed_disk.sku else None, + managed_by=managed_disk.managed_by, + max_shares=managed_disk.max_shares, + managed_by_extended=managed_disk.managed_by_extended, + zone=managed_disk.zones[0] if managed_disk.zones and len(managed_disk.zones) > 0 else '' + ) + + +class AzureRMManagedDisk(AzureRMModuleBase): + """Configuration class for an Azure RM Managed Disk resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ), + location=dict( + type='str' + ), + storage_account_type=dict( + type='str', + choices=['Standard_LRS', 'StandardSSD_LRS', 'StandardSSD_ZRS', 'Premium_LRS', 'Premium_ZRS', 'UltraSSD_LRS'] + ), + create_option=dict( + type='str', + choices=['empty', 'import', 'copy'] + ), + storage_account_id=dict( + type='str' + ), + source_uri=dict( + type='str', + aliases=['source_resource_uri'] + ), + os_type=dict( + type='str', + choices=['linux', 'windows'] + ), + disk_size_gb=dict( + type='int' + ), + managed_by=dict( + type='str' + ), + zone=dict( + type='str', + choices=['', '1', '2', '3'] + ), + attach_caching=dict( + type='str', + choices=['', 'read_only', 'read_write'] + ), + lun=dict( + type='int' + ), + max_shares=dict( + type='int' + ), + managed_by_extended=dict( + type='list', + elements='dict', + options=managed_by_extended_spec + ) + ) + required_if = [ + ('create_option', 'import', ['source_uri', 'storage_account_id']), + ('create_option', 'copy', ['source_uri']), + ('create_option', 'empty', ['disk_size_gb']) + ] + self.results = dict( + changed=False, + state=dict()) + + self.resource_group = None + self.name = None + self.location = None + self.storage_account_type = None + self.create_option = None + self.storage_account_id = None + self.source_uri = None + self.os_type = None + self.disk_size_gb = None + self.tags = None + self.zone = None + self.managed_by = None + self.attach_caching = None + self.lun = None + self.max_shares = None + self.managed_by_extended = None + + mutually_exclusive = [['managed_by_extended', 'managed_by']] + + super(AzureRMManagedDisk, self).__init__( + derived_arg_spec=self.module_arg_spec, + required_if=required_if, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive, + supports_tags=True) + + def exec_module(self, **kwargs): + """Main module execution method""" + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + result = None + changed = False + + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + self.location = resource_group.location + + disk_instance = self.get_managed_disk() + if disk_instance is not None: + if self.create_option is None: + self.create_option = disk_instance.get('create_option') + if self.source_uri is None: + self.source_uri = disk_instance.get('source_uri') + if self.disk_size_gb is None: + self.disk_size_gb = disk_instance.get('disk_size_gb') + if self.os_type is None: + self.os_type = disk_instance.get('os_type') + if self.zone is None: + self.zone = disk_instance.get('zone') + result = disk_instance + + # need create or update + if self.state == 'present': + parameter = self.generate_managed_disk_property() + if not disk_instance or self.is_different(disk_instance, parameter): + changed = True + if not self.check_mode: + result = self.create_or_update_managed_disk(parameter) + else: + result = True + + # Mount the disk to multiple VM + if self.managed_by_extended: + if not self.check_mode: + cpu_count = multiprocessing.cpu_count() + executor = ThreadPoolExecutor(max_workers=cpu_count) + task_result = [] + for vm_item in self.managed_by_extended: + vm_name_id = self.compute_client.virtual_machines.get(vm_item['resource_group'], vm_item['name']) + if result['managed_by_extended'] is None or vm_name_id.id not in result['managed_by_extended']: + changed = True + feature = executor.submit(self.attach, vm_item['resource_group'], vm_item['name'], result) + task_result.append({'task': feature, 'vm_name': vm_item['name'], 'resource_group': vm_item['resource_group']}) + fail_attach_VM = [] + for task_item in task_result: + if task_item['task'].result() is not None: + task_item['error_msg'] = task_item['task'].result() + task_item.pop('task') + fail_attach_VM.append(task_item) + if len(fail_attach_VM) > 0: + self.fail("Disk mount failure, VM and Error message information: {0}".format(fail_attach_VM)) + + result = self.get_managed_disk() + + # unmount from the old virtual machine and mount to the new virtual machine + if self.managed_by or self.managed_by == '': + vm_name = parse_resource_id(disk_instance.get('managed_by', '')).get('name') if disk_instance else None + vm_name = vm_name or '' + if self.managed_by != vm_name or self.is_attach_caching_option_different(vm_name, result): + changed = True + if not self.check_mode: + if vm_name: + self.detach(self.resource_group, vm_name, result) + if self.managed_by: + self.attach(self.resource_group, self.managed_by, result) + result = self.get_managed_disk() + + if self.state == 'absent' and disk_instance: + changed = True + if not self.check_mode: + self.delete_managed_disk() + result = True + + self.results['changed'] = changed + self.results['state'] = result + return self.results + + def attach(self, resource_group, vm_name, disk): + vm = self._get_vm(resource_group, vm_name) + # find the lun + if self.lun: + lun = self.lun + else: + luns = ([d.lun for d in vm.storage_profile.data_disks] + if vm.storage_profile.data_disks else []) + lun = 0 + while True: + if lun not in luns: + break + lun = lun + 1 + for item in vm.storage_profile.data_disks: + if item.name == self.name: + lun = item.lun + + # prepare the data disk + params = self.compute_models.ManagedDiskParameters(id=disk.get('id'), storage_account_type=disk.get('storage_account_type')) + caching_options = self.compute_models.CachingTypes[self.attach_caching] if self.attach_caching and self.attach_caching != '' else None + # pylint: disable=missing-kwoa + data_disk = self.compute_models.DataDisk(lun=lun, + create_option=self.compute_models.DiskCreateOptionTypes.attach, + managed_disk=params, + caching=caching_options) + vm.storage_profile.data_disks.append(data_disk) + return self._update_vm(resource_group, vm_name, vm) + + def detach(self, resource_group, vm_name, disk): + vm = self._get_vm(resource_group, vm_name) + leftovers = [d for d in vm.storage_profile.data_disks if d.name.lower() != disk.get('name').lower()] + if len(vm.storage_profile.data_disks) == len(leftovers): + self.fail("No disk with the name '{0}' was found".format(disk.get('name'))) + vm.storage_profile.data_disks = leftovers + self._update_vm(resource_group, vm_name, vm) + + def _update_vm(self, resource_group, name, params): + try: + poller = self.compute_client.virtual_machines.begin_create_or_update(resource_group, name, params) + self.get_poller_result(poller) + except Exception as exc: + if self.managed_by_extended: + return exc + else: + self.fail("Error updating virtual machine {0} - {1}".format(name, str(exc))) + + def _get_vm(self, resource_group, name): + try: + return self.compute_client.virtual_machines.get(resource_group, name, expand='instanceview') + except Exception as exc: + self.fail("Error getting virtual machine {0} - {1}".format(name, str(exc))) + + def generate_managed_disk_property(self): + # TODO: Add support for EncryptionSettings, DiskIOPSReadWrite, DiskMBpsReadWrite + disk_params = {} + creation_data = {} + disk_params['location'] = self.location + disk_params['tags'] = self.tags + if self.zone: + disk_params['zones'] = [self.zone] + if self.storage_account_type: + storage_account_type = self.compute_models.DiskSku(name=self.storage_account_type) + disk_params['sku'] = storage_account_type + disk_params['disk_size_gb'] = self.disk_size_gb + creation_data['create_option'] = self.compute_models.DiskCreateOption.empty + if self.create_option == 'import': + creation_data['create_option'] = self.compute_models.DiskCreateOption.import_enum + creation_data['source_uri'] = self.source_uri + creation_data['source_account_id'] = self.storage_account_id + elif self.create_option == 'copy': + creation_data['create_option'] = self.compute_models.DiskCreateOption.copy + creation_data['source_resource_id'] = self.source_uri + if self.os_type: + disk_params['os_type'] = self.compute_models.OperatingSystemTypes(self.os_type.capitalize()) + else: + disk_params['os_type'] = None + if self.max_shares: + disk_params['max_shares'] = self.max_shares + disk_params['creation_data'] = creation_data + return disk_params + + def create_or_update_managed_disk(self, parameter): + try: + poller = self.compute_client.disks.begin_create_or_update(self.resource_group, + self.name, + parameter) + aux = self.get_poller_result(poller) + return managed_disk_to_dict(aux) + except Exception as e: + self.fail("Error creating the managed disk: {0}".format(str(e))) + + # This method accounts for the difference in structure between the + # Azure retrieved disk and the parameters for the new disk to be created. + def is_different(self, found_disk, new_disk): + resp = False + if new_disk.get('disk_size_gb'): + if not found_disk['disk_size_gb'] == new_disk['disk_size_gb']: + resp = True + if new_disk.get('os_type'): + if found_disk['os_type'] is None or not self.compute_models.OperatingSystemTypes(found_disk['os_type'].capitalize()) == new_disk['os_type']: + resp = True + if new_disk.get('sku'): + if not found_disk['storage_account_type'] == new_disk['sku'].name: + resp = True + # Check how to implement tags + if new_disk.get('tags') is not None: + if not found_disk['tags'] == new_disk['tags']: + resp = True + if self.zone is not None: + if not found_disk['zone'] == self.zone: + resp = True + if self.max_shares is not None: + if not found_disk['max_shares'] == self.max_shares: + resp = True + return resp + + def delete_managed_disk(self): + try: + poller = self.compute_client.disks.begin_delete(self.resource_group, + self.name) + return self.get_poller_result(poller) + except Exception as e: + self.fail("Error deleting the managed disk: {0}".format(str(e))) + + def get_managed_disk(self): + try: + resp = self.compute_client.disks.get( + self.resource_group, + self.name) + return managed_disk_to_dict(resp) + except ResourceNotFoundError: + self.log('Did not find managed disk') + + def is_attach_caching_option_different(self, vm_name, disk): + resp = False + if vm_name: + vm = self._get_vm(self.resource_group, vm_name) + correspondence = next((d for d in vm.storage_profile.data_disks if d.name.lower() == disk.get('name').lower()), None) + caching_options = self.compute_models.CachingTypes[self.attach_caching] if self.attach_caching and self.attach_caching != '' else None + if correspondence and correspondence.caching != caching_options: + resp = True + if correspondence.caching == 'none' and (self.attach_caching == '' or self.attach_caching is None): + resp = False + return resp + + +def main(): + """Main execution""" + AzureRMManagedDisk() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_manageddisk_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_manageddisk_info.py new file mode 100644 index 000000000..01656929c --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_manageddisk_info.py @@ -0,0 +1,268 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2020, Paul Aiton <@paultaiton> +# Copyright: (c) 2016, Bruno Medina Bolanos Cacho +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = r''' +--- +module: azure_rm_manageddisk_info + +version_added: "0.1.2" + +short_description: Get managed disk facts + +description: + - Get facts for a specific managed disk or all managed disks. + +notes: + - This module was called M(azure.azcollection.azure_rm_managed_disk_facts) before Ansible 2.8. The usage did not change. + +options: + name: + description: + - Limit results to a specific managed disk. + type: str + resource_group: + description: + - Limit results to a specific resource group. + - Required if I(name) is set + type: str + tags: + description: + - Limit results by providing a list of tags. + - Format tags as 'key' or 'key:value'. + type: list + elements: str + managed_by: + description: + - Limit results to disks managed by the given VM fqid. + type: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Bruno Medina (@brusMX) + - Paul Aiton (@paultaiton) +''' + +EXAMPLES = r''' +- name: Get facts for one managed disk + azure_rm_manageddisk_info: + name: Testing + resource_group: myResourceGroup + +- name: Get facts for all managed disks + azure_rm_manageddisk_info: + +- name: Get facts for all managed disks managed by a specific vm + azure_rm_manageddisk_info: + managed_by: '/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/rgName/Microsoft.Compute/virtualMachines/vmName' + +- name: Get facts by tags + azure_rm_manageddisk_info: + tags: + - testing +''' + +RETURN = r''' +azure_managed_disk: + description: + - List of managed disk dicts. + returned: always + type: list + contains: + id: + description: + - Resource id. + type: str + name: + description: + - Name of the managed disk. + type: str + location: + description: + - Valid Azure location. + type: str + storage_account_type: + description: + - Type of storage for the managed disk. + - See U(https://docs.microsoft.com/en-us/azure/virtual-machines/windows/disks-types) for more information about this type. + type: str + sample: Standard_LRS + create_option: + description: + - Create option of the disk. + type: str + sample: copy + source_uri: + description: + - URI to a valid VHD file to be used or the resource ID of the managed disk to copy. + type: str + os_type: + description: + - Type of Operating System. + choices: + - linux + - windows + type: str + disk_size_gb: + description: + - Size in GB of the managed disk to be created. + type: str + managed_by: + description: + - Name of an existing virtual machine with which the disk is or will be associated, this VM should be in the same resource group. + type: str + sample: "/subscriptions/xxx-xxx/resourceGroups/myRG/providers/Microsoft.Compute/virtualMachines/testVM" + max_shares: + description: + - The maximum number of VMs that can attach to the disk at the same time. + - Value greater than one indicates a disk that can be mounted on multiple VMs at the same time. + type: int + sample: 3 + managed_by_extended: + description: + - List ID of an existing virtual machine with which the disk is or will be associated. + type: list + sample: ["/subscriptions/xxx-xxx/resourceGroups/myRG/providers/Microsoft.Compute/virtualMachines/testVM"] + tags: + description: + - Tags to assign to the managed disk. + type: dict + sample: { "tag": "value" } +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError +except Exception: + # handled in azure_rm_common + pass + + +class AzureRMManagedDiskInfo(AzureRMModuleBase): + """Utility class to get managed disk facts""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict(type='str'), + name=dict(type='str'), + tags=dict(type='list', elements='str'), + managed_by=dict(type='str') + ) + + self.results = dict( + ansible_info=dict( + azure_managed_disk=[] + ) + ) + + self.resource_group = None + self.name = None + self.tags = None + self.managed_by = None + + super(AzureRMManagedDiskInfo, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + facts_module=True, + supports_tags=False) + + def exec_module(self, **kwargs): + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.name and not self.resource_group: + self.fail('Parameter Error: name requires that resource_group also be set.') + + if self.name: + self.results['ansible_info']['azure_managed_disk'] = self.get_disk() + elif self.resource_group: + self.results['ansible_info']['azure_managed_disk'] = self.list_disks_by_resource_group() + else: + self.results['ansible_info']['azure_managed_disk'] = self.list_disks() + + return self.results + + def get_disk(self): + """Get a single managed disk""" + results = [] + + try: + results = [self.compute_client.disks.get(self.resource_group, + self.name)] + if self.managed_by: + results = [disk for disk in results if disk.managed_by == self.managed_by] + if self.tags: + results = [disk for disk in results if self.has_tags(disk.tags, self.tags)] + results = [self.managed_disk_to_dict(disk) for disk in results] + except ResourceNotFoundError: + self.log('Could not find disk {0} in resource group {1}'.format(self.name, self.resource_group)) + + return results + + def list_disks(self): + """Get all managed disks""" + results = [] + + try: + results = self.compute_client.disks.list() + if self.managed_by: + results = [disk for disk in results if disk.managed_by == self.managed_by] + if self.tags: + results = [disk for disk in results if self.has_tags(disk.tags, self.tags)] + results = [self.managed_disk_to_dict(disk) for disk in results] + except ResourceNotFoundError as exc: + self.fail('Failed to list all items - {0}'.format(str(exc))) + + return results + + def list_disks_by_resource_group(self): + """Get managed disks in a resource group""" + results = [] + + try: + results = self.compute_client.disks.list_by_resource_group(resource_group_name=self.resource_group) + if self.managed_by: + results = [disk for disk in results if disk.managed_by == self.managed_by] + if self.tags: + results = [disk for disk in results if self.has_tags(disk.tags, self.tags)] + results = [self.managed_disk_to_dict(disk) for disk in results] + except ResourceNotFoundError as exc: + self.fail('Failed to list items by resource group - {0}'.format(str(exc))) + + return results + + def managed_disk_to_dict(self, managed_disk): + create_data = managed_disk.creation_data + return dict( + id=managed_disk.id, + name=managed_disk.name, + location=managed_disk.location, + tags=managed_disk.tags, + create_option=create_data.create_option.lower(), + source_uri=create_data.source_uri or create_data.source_resource_id, + disk_size_gb=managed_disk.disk_size_gb, + os_type=managed_disk.os_type.lower() if managed_disk.os_type else None, + storage_account_type=managed_disk.sku.name if managed_disk.sku else None, + managed_by=managed_disk.managed_by, + max_shares=managed_disk.max_shares, + managed_by_extended=managed_disk.managed_by_extended, + zone=managed_disk.zones[0] if managed_disk.zones and len(managed_disk.zones) > 0 else '' + ) + + +def main(): + """Main module execution code path""" + AzureRMManagedDiskInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_managementgroup.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_managementgroup.py new file mode 100644 index 000000000..d9bee84fd --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_managementgroup.py @@ -0,0 +1,415 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Zim Kalinowski, (@zikalino) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_managementgroup +version_added: '0.1.1' +short_description: Manage Azure ManagementGroup instance +description: + - Create, update and delete instance of Azure ManagementGroup. +options: + group_id: + description: + - Management Group ID. + type: str + required: true + name: + description: + - The name of the management group. + - For example 00000000-0000-0000-0000-000000000000. + type: str + id: + description: + - The fully qualified ID for the management group. + - For example /providers/Microsoft.Management/managementGroups/0000000-0000-0000-0000-000000000000. + type: str + type: + description: + - The type of the resource. + - For example /providers/Microsoft.Management/managementGroups. + type: str + properties: + description: + - The properties of the management group. + type: dict + suboptions: + tenant_id: + description: + - The AAD Tenant ID associated with the management group. + - For example 00000000-0000-0000-0000-000000000000. + type: str + display_name: + description: + - The friendly name of the management group. + type: str + parent_id: + description: + - The parent of the management group. + type: str + state: + description: + - Assert the state of the ManagementGroup. + - Use C(present) to create or update an ManagementGroup and C(absent) to delete it. + default: present + choices: + - absent + - present + type: str +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' +- name: Create new management group + azure_rm_managementgroup: + group_id: test + type: /providers/Microsoft.Management/managementGroups/ + name: test + +- name: create management group + azure_rm_managementgroup: + group_id: ChildGroup + type: /providers/Microsoft.Management/managementGroups/ + name: ChildGroup + properties: + tenant_id: 20000000-0000-0000-0000-000000000000 + display_name: ChildGroup + parent_id: /providers/Microsoft.Management/managementGroups/RootGroup + +- name: delete management group + azure_rm_managementgroup: + group_id: ChildGroup + state: absent + +''' + +RETURN = ''' +id: + description: + - The fully qualified ID for the management group. + - For example /providers/Microsoft.Management/managementGroups/0000000-0000-0000-0000-000000000000. + returned: always + type: str +type: + description: + - The type of the resource. + - For example /providers/Microsoft.Management/managementGroups. + returned: always + type: str +name: + description: + - The name of the management group. + - For example 00000000-0000-0000-0000-000000000000. + returned: always + type: str +properties: + description: + - The properties of the management group. + returned: always + type: complex + contains: + tenant_id: + description: + - The AAD Tenant ID associated with the management group. + - For example 00000000-0000-0000-0000-000000000000. + returned: always + type: str + display_name: + description: + - The friendly name of the management group. + returned: always + type: str + roles: + description: + - The role definitions associated with the management group. + returned: always + type: str + details: + description: + - The details of the management group. + returned: always + type: complex + contains: + version: + description: + - The version number of the object. + returned: always + type: str + updated_time: + description: + - The date and time when this object was last updated. + returned: always + type: str + updated_by: + description: + - The identity of the principal or process that updated the object. + returned: always + type: str + parent: + description: + - The parent of the management group. + returned: always + type: complex + contains: + id: + description: + - The fully qualified ID for the parent management group. + - For example /providers/Microsoft.Management/managementGroups/0000000-0000-0000-0000-000000000000. + returned: always + type: str + name: + description: + - The name of the parent management group + returned: always + type: str + display_name: + description: + - The friendly name of the parent management group. + returned: always + type: str + children: + description: + - The list of children. + returned: always + type: complex + contains: + type: + description: + - The fully qualified resource type which includes provider namespace. + - For example /providers/Microsoft.Management/managementGroups. + returned: always + type: str + id: + description: + - The fully qualified ID for the child resource (management group or subscription). + - For example /providers/Microsoft.Management/managementGroups/0000000-0000-0000-0000-000000000000. + returned: always + type: str + name: + description: + - The name of the child entity. + returned: always + type: str + display_name: + description: + - The friendly name of the child resource. + returned: always + type: str + +''' + +import time +import json +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_rest import GenericRestClient +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMManagementGroups(AzureRMModuleBaseExt): + def __init__(self): + self.module_arg_spec = dict( + group_id=dict(type='str', updatable=False, required=True), + name=dict(type='str', updatable=False), + id=dict(type='str'), + type=dict(type='str'), + properties=dict( + type='dict', + disposition="/", + options=dict( + tenant_id=dict(type='str', disposition="tenantId"), + display_name=dict(type='str', disposition="displayName"), + parent_id=dict(type='str', disposition="details/parent/id") + ) + ), + state=dict(type='str', default='present', choices=['present', 'absent']), + ) + + self.group_id = None + self.state = None + + self.results = dict(changed=False) + self.mgmt_client = None + self.url = None + self.status_code = [200, 201, 202] + self.to_do = Actions.NoAction + self.body = {} + self.query_parameters = {} + self.query_parameters['api-version'] = '2018-03-01-preview' + self.header_parameters = {} + self.header_parameters['Content-Type'] = 'application/json; charset=utf-8' + + super(AzureRMManagementGroups, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()): + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + self.body[key] = kwargs[key] + + self.inflate_parameters(self.module_arg_spec, self.body, 0) + + old_response = None + response = None + + self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient, + base_url=self._cloud_environment.endpoints.resource_manager) + + self.url = ('/providers' + + '/Microsoft.Management' + + '/managementGroups' + + '/{{ management_group_name }}') + self.url = self.url.replace('{{ management_group_name }}', self.group_id) + + old_response = self.get_resource() + + if not old_response: + self.log("ManagementGroup instance doesn't exist") + + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + self.log('ManagementGroup instance already exists') + + if self.state == 'absent': + self.to_do = Actions.Delete + else: + modifiers = {} + self.results['compare'] = [] + self.create_compare_modifiers(self.module_arg_spec, '', modifiers) + self.results['modifiers'] = modifiers + + if not self.default_compare(modifiers, self.body, old_response, '', self.results): + self.to_do = Actions.Update + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log('Need to Create / Update the ManagementGroup instance') + + if self.check_mode: + self.results['changed'] = True + return self.results + + response = self.create_update_resource() + + self.results['body'] = self.body + # if not old_response: + self.results['changed'] = True + # else: + # self.results['changed'] = old_response.__ne__(response) + self.log('Creation / Update done') + elif self.to_do == Actions.Delete: + self.log('ManagementGroup instance deleted') + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_resource() + + # make sure instance is actually deleted, for some Azure resources, instance is hanging around + # for some time after deletion -- this should be really fixed in Azure + while self.get_resource(): + time.sleep(20) + else: + self.log('ManagementGroup instance unchanged') + self.results['changed'] = False + response = old_response + + if response: + self.results["id"] = response["id"] + self.results["type"] = response["type"] + self.results["name"] = response["name"] + self.results["properties"] = response["properties"] + + return self.results + + def create_update_resource(self): + # self.log('Creating / Updating the ManagementGroup instance {0}'.format(self.)) + + try: + response = self.mgmt_client.query(self.url, + 'PUT', + self.query_parameters, + self.header_parameters, + self.body, + self.status_code, + 600, + 30) + except Exception as exc: + self.log('Error attempting to create the ManagementGroup instance.') + self.fail('Error creating the ManagementGroup instance: {0}'.format(str(exc))) + + try: + response = json.loads(response.text) + except Exception: + response = {'text': response.text} + pass + + return response + + def delete_resource(self): + # self.log('Deleting the ManagementGroup instance {0}'.format(self.)) + try: + response = self.mgmt_client.query(self.url, + 'DELETE', + self.query_parameters, + self.header_parameters, + None, + self.status_code, + 600, + 30) + except Exception as e: + self.log('Error attempting to delete the ManagementGroup instance.') + self.fail('Error deleting the ManagementGroup instance: {0}'.format(str(e))) + + return True + + def get_resource(self): + # self.log('Checking if the ManagementGroup instance {0} is present'.format(self.)) + found = False + try: + response = self.mgmt_client.query(self.url, + 'GET', + self.query_parameters, + self.header_parameters, + None, + self.status_code, + 600, + 30) + found = True + response = json.loads(response.text) + self.log("Response : {0}".format(response)) + # self.log("ManagementGroup instance : {0} found".format(response.name)) + except Exception as e: + self.log('Did not find the ManagementGroup instance. msg: {0}'.format(e)) + if found is True: + return response + + return False + + +def main(): + AzureRMManagementGroups() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_managementgroup_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_managementgroup_info.py new file mode 100644 index 000000000..c95d10165 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_managementgroup_info.py @@ -0,0 +1,327 @@ +#!/usr/bin/python +# +# Copyright (c) 2021 Paul Aiton, < @paultaiton > +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_managementgroup_info + +version_added: "1.5.0" + +short_description: Get Azure Management Group facts + +description: + - Get facts for a specific Management Group or all Management Groups. + +options: + name: + description: + - Limit results to a specific management group by name. + - Mutually exclusive with I(id). + aliases: + - management_group_name + type: str + id: + description: + - Limit results to a specific management group by id. + - Mutually exclusive with I(name). + type: str + flatten: + description: + - If c(True) then child management_groups and subscriptions will be copied to the root + of the management_groups and subscriptions return list respectively. + - By default c(False), child elements will only apear in the nested complex. + - Option only matters when I(children) is c(True), and will otherwise be silently ignored. + type: bool + default: False + children: + description: + - If c(False), then only I(name) or I(id) group will be fetched, or only the list of root groups. + - If c(True), then the children groups will also be returned. + type: bool + default: False + recurse: + description: + - By default, c(False), only the direct children are returned if I(children) is c(True). + - If c(True), then all descendants of the heirarchy are returned. + - Option only matters when I(children) is c(True), and will otherwise be silently ignored. + type: bool + default: False + +notes: + - azure_rm_managementgroup_info - The roles assigned to the principal executing the playbook will determine what is + a root management_group. You may also be able to request the details of a parent management group, but unable to + fetch that group. It is highly recommended that if I(children) is set c(True) that specific management groups are + requested since a list of all groups will require an additional Azure API call for each returned group. + +seealso: + - module: azure_rm_subscription_info + description: module to look up more in depth information on subscriptions; for example tags. + - module: azure_rm_roleassignment_info + description: module to look up RBAC role assignments, which can use management group id as scope. + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Paul Aiton (@paultaiton) +''' + +EXAMPLES = ''' +- name: Get facts for all root management groups for authenticated principal + azure_rm_managementgroup_info: + +- name: Get facts for one management group by id with direct children + azure_rm_managementgroup_info: + id: /providers/Microsoft.Management/managementGroups/contoso-group + children: True + +- name: Get facts for one management group by name with all children, flattened into top list + azure_rm_managementgroup_info: + name: "contoso-group" + children: True + recurse: True + flatten: True +''' + +RETURN = ''' +management_groups: + description: + - List of Management Group dicts. + returned: always + type: list + contains: + display_name: + description: Management Group display name. + returned: always + type: str + sample: "My Management Group" + id: + description: Management Group fully qualified id. + returned: always + type: str + sample: "/providers/Microsoft.Management/managementGroups/group-name" + name: + description: Management Group display name. + returned: always + type: str + sample: group-name + tenant_id: + description: Management Group tenant id + returned: always + type: str + sample: "00000000-0000-0000-0000-000000000000" + type: + description: Management Group type + returned: always + type: str + sample: "/providers/Microsoft.Management/managementGroups" + children: + description: Child management groups or subscriptions. + returned: if I(children) is c(True) + type: list + sample: Nested list of children. Same as top groups, but without tenant_id. +subscriptions: + description: + - List of subscription objects. + returned: if I(children) and I(flatten) are both c(True) + type: list + contains: + display_name: + description: subscription display name. + returned: always + type: str + sample: "some-subscription-name" + id: + description: subscription fully qualified id. + returned: always + type: str + sample: "/subscriptions/00000000-0000-0000-0000-feedc0ffee000000" + subscription_id: + description: subscription guid. + returned: always + type: str + sample: "00000000-0000-0000-0000-feedc0ffee000000" + type: + description: Management Group type + returned: always + type: str + sample: "/subscriptions" +''' + + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + + +class AzureRMManagementGroupInfo(AzureRMModuleBase): + def __init__(self): + self.module_arg_spec = dict( + children=dict(type='bool', default=False), + flatten=dict(type='bool', default=False), + id=dict(type='str'), + name=dict(type='str', aliases=['management_group_name']), + recurse=dict(type='bool', default=False) + ) + + self.results = dict( + changed=False, + management_groups=[] + ) + + self.children = None + self.flatten = None + self.id = None + self.name = None + self.recurse = None + + mutually_exclusive = [['name', 'id']] + + super(AzureRMManagementGroupInfo, self).__init__(self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + mutually_exclusive=mutually_exclusive, + facts_module=True) + + def exec_module(self, **kwargs): + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + response = [] + + if self.name or self.id: + response = [self.get_item()] + else: + response = self.list_items() + + if self.flatten and self.children: + self.results['subscriptions'] = [] + for group in response: + new_groups = [] + new_subscriptions = [] + self.results['management_groups'].append(group) + new_groups, new_subscriptions = self.flatten_group(group) + self.results['management_groups'] += new_groups + self.results['subscriptions'] += new_subscriptions + else: + self.results['management_groups'] = response + + return self.results + + def get_item(self, mg_name=None): + if not mg_name: + # The parameter to SDK's management_groups.get(group_id) is not correct, + # it only works with a bare name value, and not the fqid. + if self.id and not self.name: + mg_name = self.id.split('/')[-1] + else: + mg_name = self.name + + expand = 'children' if self.children else None + try: + response = self.management_groups_client.management_groups.get(group_id=mg_name, + expand=expand, + recurse=self.recurse) + except Exception as e: + self.log('No Management group {0} found. msg: {1}'.format(mg_name, e)) + response = [] + + return self.to_dict(response) + + def list_items(self): + self.log('List all management groups.') + + results = [] + response = [] + + try: + response = self.management_groups_client.management_groups.list() + except Exception as e: + self.log('No Management groups found.msg: {0}'.format(e)) + pass # default to response of an empty list + + if self.children: + # list method cannot return children, so we must iterate over root management groups to + # get each one individually. + results = [self.get_item(mg_name=item.name) for item in response] + else: + results = [self.to_dict(item) for item in response] + + return results + + def to_dict(self, azure_object): + if not azure_object: + return [] + if azure_object.type == '/providers/Microsoft.Management/managementGroups': + return_dict = dict( + display_name=azure_object.display_name, + id=azure_object.id, + name=azure_object.name, + type=azure_object.type + ) + + # If group has no children, then property will be set to None type. + # We want an empty list so that it can be used in loops without issue. + if self.children and azure_object.as_dict().get('children'): + return_dict['children'] = [self.to_dict(item) for item in azure_object.children] + elif self.children: + return_dict['children'] = [] + + if azure_object.as_dict().get('details', {}).get('parent'): + parent_dict = azure_object.as_dict().get('details', {}).get('parent') + return_dict['parent'] = dict( + display_name=parent_dict.get('display_name'), + id=parent_dict.get('id'), + name=parent_dict.get('name') + ) + + elif azure_object.type == '/subscriptions': + return_dict = dict( + display_name=azure_object.display_name, + id=azure_object.id, + subscription_id=azure_object.name, + type=azure_object.type + ) + else: + # In theory if the Azure API is updated to include another child type of management groups, + # the code here will prevent an exception. But there should be logic added in an update to take + # care of a new child type of management groups. + return_dict = dict( + state="This is an unknown and unexpected object. " + + "You should report this as a bug to the ansible-collection/azcollection " + + "project on github. Please include the object type in your issue report, " + + "and @ the authors of this module. ", + type=azure_object.as_dict().get('type', None) + ) + + if azure_object.as_dict().get('tenant_id'): + return_dict['tenant_id'] = azure_object.tenant_id + + return return_dict + + def flatten_group(self, management_group): + management_group_list = [] + subscription_list = [] + if management_group.get('children'): + for child in management_group.get('children', []): + if child.get('type') == '/providers/Microsoft.Management/managementGroups': + management_group_list.append(child) + new_groups, new_subscriptions = self.flatten_group(child) + management_group_list += new_groups + subscription_list += new_subscriptions + elif child.get('type') == '/subscriptions': + subscription_list.append(child) + return management_group_list, subscription_list + + +def main(): + AzureRMManagementGroupInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mariadbconfiguration.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mariadbconfiguration.py new file mode 100644 index 000000000..69d6d509a --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mariadbconfiguration.py @@ -0,0 +1,239 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Zim Kalinowski, (@zikalino) +# Copyright (c) 2019 Matti Ranta, (@techknowlogick) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_mariadbconfiguration +version_added: "0.1.2" +short_description: Manage Configuration instance +description: + - Create, update and delete instance of Configuration. + +options: + resource_group: + description: + - The name of the resource group that contains the resource. + required: True + type: str + server_name: + description: + - The name of the server. + required: True + type: str + name: + description: + - The name of the server configuration. + required: True + type: str + value: + description: + - Value of the configuration. + type: str + state: + description: + - Assert the state of the MariaDB configuration. Use C(present) to update setting, or C(absent) to reset to default value. + default: present + choices: + - absent + - present + type: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) + - Matti Ranta (@techknowlogick) +''' + +EXAMPLES = ''' + - name: Update SQL Server setting + azure_rm_mariadbconfiguration: + resource_group: myResourceGroup + server_name: myServer + name: event_scheduler + value: "ON" +''' + +RETURN = ''' +id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/myServer/confi + gurations/event_scheduler" +''' + +import time +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.core.polling import LROPoller + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMMariaDbConfiguration(AzureRMModuleBase): + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + server_name=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + value=dict( + type='str' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.server_name = None + self.name = None + self.value = None + + self.results = dict(changed=False) + self.state = None + self.to_do = Actions.NoAction + + super(AzureRMMariaDbConfiguration, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=False) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()): + if hasattr(self, key): + setattr(self, key, kwargs[key]) + + old_response = None + response = None + + old_response = self.get_configuration() + + if not old_response: + self.log("Configuration instance doesn't exist") + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + self.log("Configuration instance already exists") + if self.state == 'absent' and old_response['source'] == 'user-override': + self.to_do = Actions.Delete + elif self.state == 'present': + self.log("Need to check if Configuration instance has to be deleted or may be updated") + if self.value != old_response.get('value'): + self.to_do = Actions.Update + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log("Need to Create / Update the Configuration instance") + + if self.check_mode: + self.results['changed'] = True + return self.results + + response = self.create_update_configuration() + + self.results['changed'] = True + self.log("Creation / Update done") + elif self.to_do == Actions.Delete: + self.log("Configuration instance deleted") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_configuration() + else: + self.log("Configuration instance unchanged") + self.results['changed'] = False + response = old_response + + if response: + self.results["id"] = response["id"] + + return self.results + + def create_update_configuration(self): + self.log("Creating / Updating the Configuration instance {0}".format(self.name)) + + try: + response = self.mariadb_client.configurations.begin_create_or_update(resource_group_name=self.resource_group, + server_name=self.server_name, + configuration_name=self.name, + parameters={'value': self.value, 'source': 'user-override'}) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + + except Exception as exc: + self.log('Error attempting to create the Configuration instance.') + self.fail("Error creating the Configuration instance: {0}".format(str(exc))) + return response.as_dict() + + def delete_configuration(self): + self.log("Deleting the Configuration instance {0}".format(self.name)) + try: + response = self.mariadb_client.configurations.begin_create_or_update(resource_group_name=self.resource_group, + server_name=self.server_name, + configuration_name=self.name, + parameters={'source': 'system-default'}) + except Exception as e: + self.log('Error attempting to delete the Configuration instance.') + self.fail("Error deleting the Configuration instance: {0}".format(str(e))) + + return True + + def get_configuration(self): + self.log("Checking if the Configuration instance {0} is present".format(self.name)) + found = False + try: + response = self.mariadb_client.configurations.get(resource_group_name=self.resource_group, + server_name=self.server_name, + configuration_name=self.name) + found = True + self.log("Response : {0}".format(response)) + self.log("Configuration instance : {0} found".format(response.name)) + except ResourceNotFoundError as e: + self.log('Did not find the Configuration instance.') + if found is True: + return response.as_dict() + + return False + + +def main(): + """Main execution""" + AzureRMMariaDbConfiguration() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mariadbconfiguration_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mariadbconfiguration_info.py new file mode 100644 index 000000000..a1165d3f7 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mariadbconfiguration_info.py @@ -0,0 +1,206 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Zim Kalinowski, (@zikalino) +# Copyright (c) 2019 Matti Ranta, (@techknowlogick) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_mariadbconfiguration_info +version_added: "0.1.2" +short_description: Get Azure MariaDB Configuration facts +description: + - Get facts of Azure MariaDB Configuration. + +options: + resource_group: + description: + - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. + required: True + type: str + server_name: + description: + - The name of the server. + required: True + type: str + name: + description: + - Setting name. + type: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) + - Matti Ranta (@techknowlogick) + +''' + +EXAMPLES = ''' + - name: Get specific setting of MariaDB Server + azure_rm_mariadbconfiguration_info: + resource_group: myResourceGroup + server_name: testserver + name: deadlock_timeout + + - name: Get all settings of MariaDB Server + azure_rm_mariadbconfiguration_info: + resource_group: myResourceGroup + server_name: server_name +''' + +RETURN = ''' +settings: + description: + - A list of dictionaries containing MariaDB Server settings. + returned: always + type: complex + contains: + id: + description: + - Setting resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/testserver + /configurations/deadlock_timeout" + name: + description: + - Setting name. + returned: always + type: str + sample: deadlock_timeout + value: + description: + - Setting value. + returned: always + type: raw + sample: 1000 + description: + description: + - Description of the configuration. + returned: always + type: str + sample: Deadlock timeout. + source: + description: + - Source of the configuration. + returned: always + type: str + sample: system-default +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +try: + from msrestazure.azure_operation import AzureOperationPoller + from azure.core.exceptions import ResourceNotFoundError + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMMariaDbConfigurationInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + server_name=dict( + type='str', + required=True + ), + name=dict( + type='str' + ) + ) + # store the results of the module operation + self.results = dict(changed=False) + self.resource_group = None + self.server_name = None + self.name = None + super(AzureRMMariaDbConfigurationInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_mariadbconfiguration_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_mariadbconfiguration_facts' module has been renamed to 'azure_rm_mariadbconfiguration_info'", version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.name is not None: + self.results['settings'] = self.get() + else: + self.results['settings'] = self.list_by_server() + return self.results + + def get(self): + ''' + Gets facts of the specified MariaDB Configuration. + + :return: deserialized MariaDB Configurationinstance state dictionary + ''' + response = None + results = [] + try: + response = self.mariadb_client.configurations.get(resource_group_name=self.resource_group, + server_name=self.server_name, + configuration_name=self.name) + self.log("Response : {0}".format(response)) + except ResourceNotFoundError as e: + self.log('Could not get facts for Configurations.') + + if response is not None: + results.append(self.format_item(response)) + + return results + + def list_by_server(self): + ''' + Gets facts of the specified MariaDB Configuration. + + :return: deserialized MariaDB Configurationinstance state dictionary + ''' + response = None + results = [] + try: + response = self.mariadb_client.configurations.list_by_server(resource_group_name=self.resource_group, + server_name=self.server_name) + self.log("Response : {0}".format(response)) + except Exception as e: + self.log('Could not get facts for Configurations.') + + if response is not None: + for item in response: + results.append(self.format_item(item)) + + return results + + def format_item(self, item): + d = item.as_dict() + d = { + 'resource_group': self.resource_group, + 'server_name': self.server_name, + 'id': d['id'], + 'name': d['name'], + 'value': d['value'], + 'description': d['description'], + 'source': d['source'] + } + return d + + +def main(): + AzureRMMariaDbConfigurationInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mariadbdatabase.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mariadbdatabase.py new file mode 100644 index 000000000..d97e1a2dd --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mariadbdatabase.py @@ -0,0 +1,294 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Zim Kalinowski, +# Copyright (c) 2019 Matti Ranta, (@techknowlogick) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_mariadbdatabase +version_added: "0.1.2" +short_description: Manage MariaDB Database instance +description: + - Create, update and delete instance of MariaDB Database. + +options: + resource_group: + description: + - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. + required: True + server_name: + description: + - The name of the server. + required: True + name: + description: + - The name of the database. + required: True + charset: + description: + - The charset of the database. Check MariaDB documentation for possible values. + - This is only set on creation, use I(force_update) to recreate a database if the values don't match. + collation: + description: + - The collation of the database. Check MariaDB documentation for possible values. + - This is only set on creation, use I(force_update) to recreate a database if the values don't match. + force_update: + description: + - When set to C(true), will delete and recreate the existing MariaDB database if any of the properties don't match what is set. + - When set to C(false), no change will occur to the database even if any of the properties do not match. + type: bool + default: 'no' + state: + description: + - Assert the state of the MariaDB Database. Use C(present) to create or update a database and C(absent) to delete it. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) + - Matti Ranta (@techknowlogick) + +''' + +EXAMPLES = ''' + - name: Create (or update) MariaDB Database + azure_rm_mariadbdatabase: + resource_group: myResourceGroup + server_name: testserver + name: db1 +''' + +RETURN = ''' +id: + description: + - Resource ID. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/testserver/databases/db1 +name: + description: + - Resource name. + returned: always + type: str + sample: db1 +''' + +import time + +try: + from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + from azure.core.exceptions import ResourceNotFoundError + from azure.core.polling import LROPoller + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMMariaDbDatabase(AzureRMModuleBase): + """Configuration class for an Azure RM MariaDB Database resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + server_name=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + charset=dict( + type='str' + ), + collation=dict( + type='str' + ), + force_update=dict( + type='bool', + default=False + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.server_name = None + self.name = None + self.force_update = None + self.parameters = dict() + + self.results = dict(changed=False) + self.state = None + self.to_do = Actions.NoAction + + super(AzureRMMariaDbDatabase, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=False) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()): + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + if key == "charset": + self.parameters["charset"] = kwargs[key] + elif key == "collation": + self.parameters["collation"] = kwargs[key] + + old_response = None + response = None + + resource_group = self.get_resource_group(self.resource_group) + + old_response = self.get_mariadbdatabase() + + if not old_response: + self.log("MariaDB Database instance doesn't exist") + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + self.log("MariaDB Database instance already exists") + if self.state == 'absent': + self.to_do = Actions.Delete + elif self.state == 'present': + self.log("Need to check if MariaDB Database instance has to be deleted or may be updated") + if ('collation' in self.parameters) and (self.parameters['collation'] != old_response['collation']): + self.to_do = Actions.Update + if ('charset' in self.parameters) and (self.parameters['charset'] != old_response['charset']): + self.to_do = Actions.Update + if self.to_do == Actions.Update: + if self.force_update: + if not self.check_mode: + self.delete_mariadbdatabase() + else: + self.fail("Database properties cannot be updated without setting 'force_update' option") + self.to_do = Actions.NoAction + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log("Need to Create / Update the MariaDB Database instance") + + if self.check_mode: + self.results['changed'] = True + return self.results + + response = self.create_update_mariadbdatabase() + self.results['changed'] = True + self.log("Creation / Update done") + elif self.to_do == Actions.Delete: + self.log("MariaDB Database instance deleted") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_mariadbdatabase() + # make sure instance is actually deleted, for some Azure resources, instance is hanging around + # for some time after deletion -- this should be really fixed in Azure + while self.get_mariadbdatabase(): + time.sleep(20) + else: + self.log("MariaDB Database instance unchanged") + self.results['changed'] = False + response = old_response + + if response: + self.results["id"] = response["id"] + self.results["name"] = response["name"] + + return self.results + + def create_update_mariadbdatabase(self): + ''' + Creates or updates MariaDB Database with the specified configuration. + + :return: deserialized MariaDB Database instance state dictionary + ''' + self.log("Creating / Updating the MariaDB Database instance {0}".format(self.name)) + + try: + response = self.mariadb_client.databases.begin_create_or_update(resource_group_name=self.resource_group, + server_name=self.server_name, + database_name=self.name, + parameters=self.parameters) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + + except Exception as exc: + self.log('Error attempting to create the MariaDB Database instance.') + self.fail("Error creating the MariaDB Database instance: {0}".format(str(exc))) + return response.as_dict() + + def delete_mariadbdatabase(self): + ''' + Deletes specified MariaDB Database instance in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the MariaDB Database instance {0}".format(self.name)) + try: + response = self.mariadb_client.databases.begin_delete(resource_group_name=self.resource_group, + server_name=self.server_name, + database_name=self.name) + except Exception as e: + self.log('Error attempting to delete the MariaDB Database instance.') + self.fail("Error deleting the MariaDB Database instance: {0}".format(str(e))) + + return True + + def get_mariadbdatabase(self): + ''' + Gets the properties of the specified MariaDB Database. + + :return: deserialized MariaDB Database instance state dictionary + ''' + self.log("Checking if the MariaDB Database instance {0} is present".format(self.name)) + found = False + try: + response = self.mariadb_client.databases.get(resource_group_name=self.resource_group, + server_name=self.server_name, + database_name=self.name) + found = True + self.log("Response : {0}".format(response)) + self.log("MariaDB Database instance : {0} found".format(response.name)) + except ResourceNotFoundError as e: + self.log('Did not find the MariaDB Database instance.') + if found is True: + return response.as_dict() + + return False + + +def main(): + """Main execution""" + AzureRMMariaDbDatabase() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mariadbdatabase_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mariadbdatabase_info.py new file mode 100644 index 000000000..02e005119 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mariadbdatabase_info.py @@ -0,0 +1,204 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Zim Kalinowski, +# Copyright (c) 2019 Matti Ranta, (@techknowlogick) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_mariadbdatabase_info +version_added: "0.1.2" +short_description: Get Azure MariaDB Database facts +description: + - Get facts of MariaDB Database. + +options: + resource_group: + description: + - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. + required: True + type: str + server_name: + description: + - The name of the server. + required: True + type: str + name: + description: + - The name of the database. + type: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) + - Matti Ranta (@techknowlogick) + +''' + +EXAMPLES = ''' + - name: Get instance of MariaDB Database + azure_rm_mariadbdatabase_info: + resource_group: myResourceGroup + server_name: server_name + name: database_name + + - name: List instances of MariaDB Database + azure_rm_mariadbdatabase_info: + resource_group: myResourceGroup + server_name: server_name +''' + +RETURN = ''' +databases: + description: + - A list of dictionaries containing facts for MariaDB Databases. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/testser + ver/databases/db1" + resource_group: + description: + - Resource group name. + returned: always + type: str + sample: testrg + server_name: + description: + - Server name. + returned: always + type: str + sample: testserver + name: + description: + - Resource name. + returned: always + type: str + sample: db1 + charset: + description: + - The charset of the database. + returned: always + type: str + sample: UTF8 + collation: + description: + - The collation of the database. + returned: always + type: str + sample: English_United States.1252 +''' + +try: + from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + from azure.core.exceptions import ResourceNotFoundError + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMMariaDbDatabaseInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + server_name=dict( + type='str', + required=True + ), + name=dict( + type='str' + ) + ) + # store the results of the module operation + self.results = dict( + changed=False + ) + self.resource_group = None + self.server_name = None + self.name = None + super(AzureRMMariaDbDatabaseInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_mariadbdatabase_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_mariadbdatabase_facts' module has been renamed to 'azure_rm_mariadbdatabase_info'", version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if (self.resource_group is not None and + self.server_name is not None and + self.name is not None): + self.results['databases'] = self.get() + elif (self.resource_group is not None and + self.server_name is not None): + self.results['databases'] = self.list_by_server() + return self.results + + def get(self): + response = None + results = [] + try: + response = self.mariadb_client.databases.get(resource_group_name=self.resource_group, + server_name=self.server_name, + database_name=self.name) + self.log("Response : {0}".format(response)) + except ResourceNotFoundError as e: + self.log('Could not get facts for Databases.') + + if response is not None: + results.append(self.format_item(response)) + + return results + + def list_by_server(self): + response = None + results = [] + try: + response = self.mariadb_client.databases.list_by_server(resource_group_name=self.resource_group, + server_name=self.server_name) + self.log("Response : {0}".format(response)) + except Exception as e: + self.fail("Error listing for server {0} - {1}".format(self.server_name, str(e))) + + if response is not None: + for item in response: + results.append(self.format_item(item)) + + return results + + def format_item(self, item): + d = item.as_dict() + d = { + 'resource_group': self.resource_group, + 'server_name': self.server_name, + 'name': d['name'], + 'charset': d['charset'], + 'collation': d['collation'] + } + return d + + +def main(): + AzureRMMariaDbDatabaseInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mariadbfirewallrule.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mariadbfirewallrule.py new file mode 100644 index 000000000..0424db9f7 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mariadbfirewallrule.py @@ -0,0 +1,273 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Zim Kalinowski, +# Copyright (c) 2019 Matti Ranta, (@techknowlogick) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_mariadbfirewallrule +version_added: "0.1.2" +short_description: Manage MariaDB firewall rule instance +description: + - Create, update and delete instance of MariaDB firewall rule. + +options: + resource_group: + description: + - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. + required: True + server_name: + description: + - The name of the server. + required: True + name: + description: + - The name of the MariaDB firewall rule. + required: True + start_ip_address: + description: + - The start IP address of the MariaDB firewall rule. Must be IPv4 format. + end_ip_address: + description: + - The end IP address of the MariaDB firewall rule. Must be IPv4 format. + state: + description: + - Assert the state of the MariaDB firewall rule. Use C(present) to create or update a rule and C(absent) to ensure it is not present. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) + - Matti Ranta (@techknowlogick) + +''' + +EXAMPLES = ''' + - name: Create (or update) MariaDB firewall rule + azure_rm_mariadbfirewallrule: + resource_group: myResourceGroup + server_name: testserver + name: rule1 + start_ip_address: 10.0.0.17 + end_ip_address: 10.0.0.20 +''' + +RETURN = ''' +id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/testserver/fire + wallRules/rule1" +''' + +import time + +try: + from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + from azure.core.exceptions import ResourceNotFoundError + from azure.core.polling import LROPoller + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMMariaDbFirewallRule(AzureRMModuleBase): + """Configuration class for an Azure RM MariaDB firewall rule resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + server_name=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + start_ip_address=dict( + type='str' + ), + end_ip_address=dict( + type='str' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.server_name = None + self.name = None + self.start_ip_address = None + self.end_ip_address = None + + self.results = dict(changed=False) + self.state = None + self.parameters = dict() + self.to_do = Actions.NoAction + + super(AzureRMMariaDbFirewallRule, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=False) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()): + if hasattr(self, key): + setattr(self, key, kwargs[key]) + if key in ['start_ip_address', 'end_ip_address']: + self.parameters[key] = kwargs[key] + + old_response = None + response = None + + resource_group = self.get_resource_group(self.resource_group) + + old_response = self.get_firewallrule() + + if not old_response: + self.log("MariaDB firewall rule instance doesn't exist") + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + self.log("MariaDB firewall rule instance already exists") + if self.state == 'absent': + self.to_do = Actions.Delete + elif self.state == 'present': + self.log("Need to check if MariaDB firewall rule instance has to be deleted or may be updated") + if (self.start_ip_address is not None) and (self.start_ip_address != old_response['start_ip_address']): + self.to_do = Actions.Update + if (self.end_ip_address is not None) and (self.end_ip_address != old_response['end_ip_address']): + self.to_do = Actions.Update + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log("Need to Create / Update the MariaDB firewall rule instance") + + if self.check_mode: + self.results['changed'] = True + return self.results + + response = self.create_update_firewallrule() + + if not old_response: + self.results['changed'] = True + else: + self.results['changed'] = old_response.__ne__(response) + self.log("Creation / Update done") + elif self.to_do == Actions.Delete: + self.log("MariaDB firewall rule instance deleted") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_firewallrule() + # make sure instance is actually deleted, for some Azure resources, instance is hanging around + # for some time after deletion -- this should be really fixed in Azure + while self.get_firewallrule(): + time.sleep(20) + else: + self.log("MariaDB firewall rule instance unchanged") + self.results['changed'] = False + response = old_response + + if response: + self.results["id"] = response["id"] + + return self.results + + def create_update_firewallrule(self): + ''' + Creates or updates MariaDB firewall rule with the specified configuration. + + :return: deserialized MariaDB firewall rule instance state dictionary + ''' + self.log("Creating / Updating the MariaDB firewall rule instance {0}".format(self.name)) + + try: + response = self.mariadb_client.firewall_rules.begin_create_or_update(resource_group_name=self.resource_group, + server_name=self.server_name, + firewall_rule_name=self.name, + parameters=self.parameters) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + + except Exception as exc: + self.log('Error attempting to create the MariaDB firewall rule instance.') + self.fail("Error creating the MariaDB firewall rule instance: {0}".format(str(exc))) + return response.as_dict() + + def delete_firewallrule(self): + ''' + Deletes specified MariaDB firewall rule instance in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the MariaDB firewall rule instance {0}".format(self.name)) + try: + response = self.mariadb_client.firewall_rules.begin_delete(resource_group_name=self.resource_group, + server_name=self.server_name, + firewall_rule_name=self.name) + except Exception as e: + self.log('Error attempting to delete the MariaDB firewall rule instance.') + self.fail("Error deleting the MariaDB firewall rule instance: {0}".format(str(e))) + + return True + + def get_firewallrule(self): + ''' + Gets the properties of the specified MariaDB firewall rule. + + :return: deserialized MariaDB firewall rule instance state dictionary + ''' + self.log("Checking if the MariaDB firewall rule instance {0} is present".format(self.name)) + found = False + try: + response = self.mariadb_client.firewall_rules.get(resource_group_name=self.resource_group, + server_name=self.server_name, + firewall_rule_name=self.name) + found = True + self.log("Response : {0}".format(response)) + self.log("MariaDB firewall rule instance : {0} found".format(response.name)) + except ResourceNotFoundError as e: + self.log('Did not find the MariaDB firewall rule instance.') + if found is True: + return response.as_dict() + + return False + + +def main(): + """Main execution""" + AzureRMMariaDbFirewallRule() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mariadbfirewallrule_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mariadbfirewallrule_info.py new file mode 100644 index 000000000..ad7f979c5 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mariadbfirewallrule_info.py @@ -0,0 +1,197 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Zim Kalinowski, +# Copyright (c) 2019 Matti Ranta, (@techknowlogick) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_mariadbfirewallrule_info +version_added: "0.0.1" +short_description: Get Azure MariaDB Firewall Rule facts +description: + - Get facts of Azure MariaDB Firewall Rule. + +options: + resource_group: + description: + - The name of the resource group. + required: True + type: str + server_name: + description: + - The name of the server. + required: True + type: str + name: + description: + - The name of the server firewall rule. + type: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) + - Matti Ranta (@techknowlogick) + +''' + +EXAMPLES = ''' + - name: Get instance of MariaDB Firewall Rule + azure_rm_mariadbfirewallrule_info: + resource_group: myResourceGroup + server_name: server_name + name: firewall_rule_name + + - name: List instances of MariaDB Firewall Rule + azure_rm_mariadbfirewallrule_info: + resource_group: myResourceGroup + server_name: server_name +''' + +RETURN = ''' +rules: + description: + - A list of dictionaries containing facts for MariaDB Firewall Rule. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/TestGroup/providers/Microsoft.DBforMariaDB/servers/testserver/fire + wallRules/rule1" + server_name: + description: + - The name of the server. + returned: always + type: str + sample: testserver + name: + description: + - Resource name. + returned: always + type: str + sample: rule1 + start_ip_address: + description: + - The start IP address of the MariaDB firewall rule. + returned: always + type: str + sample: 10.0.0.16 + end_ip_address: + description: + - The end IP address of the MariaDB firewall rule. + returned: always + type: str + sample: 10.0.0.18 +''' + +try: + from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + from msrestazure.azure_operation import AzureOperationPoller + from azure.core.exceptions import ResourceNotFoundError + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMMariaDbFirewallRuleInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + server_name=dict( + type='str', + required=True + ), + name=dict( + type='str' + ) + ) + # store the results of the module operation + self.results = dict( + changed=False + ) + self.resource_group = None + self.server_name = None + self.name = None + super(AzureRMMariaDbFirewallRuleInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_mariadbfirewallrule_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_mariadbfirewallrule_facts' module has been renamed to 'azure_rm_mariadbfirewallrule_info'", version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if (self.name is not None): + self.results['rules'] = self.get() + else: + self.results['rules'] = self.list_by_server() + return self.results + + def get(self): + response = None + results = [] + try: + response = self.mariadb_client.firewall_rules.get(resource_group_name=self.resource_group, + server_name=self.server_name, + firewall_rule_name=self.name) + self.log("Response : {0}".format(response)) + except ResourceNotFoundError as e: + self.log('Could not get facts for FirewallRules.') + + if response is not None: + results.append(self.format_item(response)) + + return results + + def list_by_server(self): + response = None + results = [] + try: + response = self.mariadb_client.firewall_rules.list_by_server(resource_group_name=self.resource_group, + server_name=self.server_name) + self.log("Response : {0}".format(response)) + except Exception as e: + self.log('Could not get facts for FirewallRules.') + + if response is not None: + for item in response: + results.append(self.format_item(item)) + + return results + + def format_item(self, item): + d = item.as_dict() + d = { + 'resource_group': self.resource_group, + 'id': d['id'], + 'server_name': self.server_name, + 'name': d['name'], + 'start_ip_address': d['start_ip_address'], + 'end_ip_address': d['end_ip_address'] + } + return d + + +def main(): + AzureRMMariaDbFirewallRuleInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mariadbserver.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mariadbserver.py new file mode 100644 index 000000000..3e1b838cc --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mariadbserver.py @@ -0,0 +1,383 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Zim Kalinowski, +# Copyright (c) 2019 Matti Ranta, (@techknowlogick) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_mariadbserver +version_added: "0.1.2" +short_description: Manage MariaDB Server instance +description: + - Create, update and delete instance of MariaDB Server. + +options: + resource_group: + description: + - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. + required: True + name: + description: + - The name of the server. + required: True + sku: + description: + - The SKU (pricing tier) of the server. + suboptions: + name: + description: + - The name of the SKU, typically, tier + family + cores, for example C(B_Gen4_1), C(GP_Gen5_8). + tier: + description: + - The tier of the particular SKU, for example C(Basic). + choices: + - basic + - standard + capacity: + description: + - The scale up/out capacity, representing server's compute units. + type: int + size: + description: + - The size code, to be interpreted by resource as appropriate. + location: + description: + - Resource location. If not set, location from the resource group will be used as default. + storage_mb: + description: + - The maximum storage allowed for a server. + type: int + version: + description: + - Server version. + choices: + - 10.2 + - 10.3 + enforce_ssl: + description: + - Enable SSL enforcement. + type: bool + default: False + admin_username: + description: + - The administrator's login name of a server. Can only be specified when the server is being created (and is required for creation). + admin_password: + description: + - The password of the administrator login. + create_mode: + description: + - Create mode of SQL Server. + default: Default + state: + description: + - Assert the state of the MariaDB Server. Use C(present) to create or update a server and C(absent) to delete it. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Zim Kalinowski (@zikalino) + - Matti Ranta (@techknowlogick) + +''' + +EXAMPLES = ''' + - name: Create (or update) MariaDB Server + azure_rm_mariadbserver: + resource_group: myResourceGroup + name: testserver + sku: + name: B_Gen5_1 + tier: Basic + location: eastus + storage_mb: 1024 + enforce_ssl: True + version: 10.2 + admin_username: cloudsa + admin_password: password +''' + +RETURN = ''' +id: + description: + - Resource ID. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/mariadbsrv1b6dd89593 +version: + description: + - Server version. Possible values include C(10.2) and C(10.3). + returned: always + type: str + sample: 10.2 +state: + description: + - A state of a server that is visible to user. Possible values include C(Ready), C(Dropping), C(Disabled). + returned: always + type: str + sample: Ready +fully_qualified_domain_name: + description: + - The fully qualified domain name of a server. + returned: always + type: str + sample: mariadbsrv1b6dd89593.mariadb.database.azure.com +''' + +import time + +try: + from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + from azure.core.exceptions import ResourceNotFoundError + from azure.core.polling import LROPoller + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMMariaDbServers(AzureRMModuleBase): + """Configuration class for an Azure RM MariaDB Server resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + sku=dict( + type='dict' + ), + location=dict( + type='str' + ), + storage_mb=dict( + type='int' + ), + version=dict( + type='str', + choices=['10.2', '10.3'] + ), + enforce_ssl=dict( + type='bool', + default=False + ), + create_mode=dict( + type='str', + default='Default' + ), + admin_username=dict( + type='str' + ), + admin_password=dict( + type='str', + no_log=True + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.name = None + self.parameters = dict() + self.tags = None + + self.results = dict(changed=False) + self.state = None + self.to_do = Actions.NoAction + + super(AzureRMMariaDbServers, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()) + ['tags']: + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + if key == "sku": + ev = kwargs[key] + if 'tier' in ev: + if ev['tier'] == 'basic': + ev['tier'] = 'Basic' + elif ev['tier'] == 'standard': + ev['tier'] = 'Standard' + self.parameters["sku"] = ev + elif key == "location": + self.parameters["location"] = kwargs[key] + elif key == "storage_mb": + self.parameters.setdefault("properties", {}).setdefault("storage_profile", {})["storage_mb"] = kwargs[key] + elif key == "version": + self.parameters.setdefault("properties", {})["version"] = kwargs[key] + elif key == "enforce_ssl": + self.parameters.setdefault("properties", {})["ssl_enforcement"] = 'Enabled' if kwargs[key] else 'Disabled' + elif key == "create_mode": + self.parameters.setdefault("properties", {})["create_mode"] = kwargs[key] + elif key == "admin_username": + self.parameters.setdefault("properties", {})["administrator_login"] = kwargs[key] + elif key == "admin_password": + self.parameters.setdefault("properties", {})["administrator_login_password"] = kwargs[key] + + old_response = None + response = None + + resource_group = self.get_resource_group(self.resource_group) + + if "location" not in self.parameters: + self.parameters["location"] = resource_group.location + + old_response = self.get_mariadbserver() + + if not old_response: + self.log("MariaDB Server instance doesn't exist") + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + self.log("MariaDB Server instance already exists") + if self.state == 'absent': + self.to_do = Actions.Delete + elif self.state == 'present': + self.log("Need to check if MariaDB Server instance has to be deleted or may be updated") + update_tags, newtags = self.update_tags(old_response.get('tags', {})) + if update_tags: + self.tags = newtags + self.to_do = Actions.Update + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log("Need to Create / Update the MariaDB Server instance") + + if self.check_mode: + self.results['changed'] = True + return self.results + + response = self.create_update_mariadbserver() + + if not old_response: + self.results['changed'] = True + else: + self.results['changed'] = old_response.__ne__(response) + self.log("Creation / Update done") + elif self.to_do == Actions.Delete: + self.log("MariaDB Server instance deleted") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_mariadbserver() + # make sure instance is actually deleted, for some Azure resources, instance is hanging around + # for some time after deletion -- this should be really fixed in Azure + while self.get_mariadbserver(): + time.sleep(20) + else: + self.log("MariaDB Server instance unchanged") + self.results['changed'] = False + response = old_response + + if response: + self.results["id"] = response["id"] + self.results["version"] = response["version"] + self.results["state"] = response["user_visible_state"] + self.results["fully_qualified_domain_name"] = response["fully_qualified_domain_name"] + + return self.results + + def create_update_mariadbserver(self): + ''' + Creates or updates MariaDB Server with the specified configuration. + + :return: deserialized MariaDB Server instance state dictionary + ''' + self.log("Creating / Updating the MariaDB Server instance {0}".format(self.name)) + + try: + self.parameters['tags'] = self.tags + if self.to_do == Actions.Create: + response = self.mariadb_client.servers.begin_create(resource_group_name=self.resource_group, + server_name=self.name, + parameters=self.parameters) + else: + # structure of parameters for update must be changed + self.parameters.update(self.parameters.pop("properties", {})) + response = self.mariadb_client.servers.begin_update(resource_group_name=self.resource_group, + server_name=self.name, + parameters=self.parameters) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + + except Exception as exc: + self.log('Error attempting to create the MariaDB Server instance.') + self.fail("Error creating the MariaDB Server instance: {0}".format(str(exc))) + return response.as_dict() + + def delete_mariadbserver(self): + ''' + Deletes specified MariaDB Server instance in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the MariaDB Server instance {0}".format(self.name)) + try: + response = self.mariadb_client.servers.begin_delete(resource_group_name=self.resource_group, + server_name=self.name) + except Exception as e: + self.log('Error attempting to delete the MariaDB Server instance.') + self.fail("Error deleting the MariaDB Server instance: {0}".format(str(e))) + + return True + + def get_mariadbserver(self): + ''' + Gets the properties of the specified MariaDB Server. + + :return: deserialized MariaDB Server instance state dictionary + ''' + self.log("Checking if the MariaDB Server instance {0} is present".format(self.name)) + found = False + try: + response = self.mariadb_client.servers.get(resource_group_name=self.resource_group, + server_name=self.name) + found = True + self.log("Response : {0}".format(response)) + self.log("MariaDB Server instance : {0} found".format(response.name)) + except ResourceNotFoundError as e: + self.log('Did not find the MariaDB Server instance.') + if found is True: + return response.as_dict() + + return False + + +def main(): + """Main execution""" + AzureRMMariaDbServers() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mariadbserver_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mariadbserver_info.py new file mode 100644 index 000000000..e5aa0c88f --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mariadbserver_info.py @@ -0,0 +1,261 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Zim Kalinowski, +# Copyright (c) 2019 Matti Ranta, (@techknowlogick) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_mariadbserver_info +version_added: "0.1.2" +short_description: Get Azure MariaDB Server facts +description: + - Get facts of MariaDB Server. + +options: + resource_group: + description: + - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. + required: True + type: str + name: + description: + - The name of the server. + type: str + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) + - Matti Ranta (@techknowlogick) + +''' + +EXAMPLES = ''' + - name: Get instance of MariaDB Server + azure_rm_mariadbserver_info: + resource_group: myResourceGroup + name: server_name + + - name: List instances of MariaDB Server + azure_rm_mariadbserver_info: + resource_group: myResourceGroup + tags: + - key:value +''' + +RETURN = ''' +servers: + description: + - A list of dictionaries containing facts for MariaDB servers. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/myabdud1223 + resource_group: + description: + - Resource group name. + returned: always + type: str + sample: myResourceGroup + name: + description: + - Resource name. + returned: always + type: str + sample: myabdud1223 + location: + description: + - The location the resource resides in. + returned: always + type: str + sample: eastus + sku: + description: + - The SKU of the server. + returned: always + type: complex + contains: + name: + description: + - The name of the SKU. + returned: always + type: str + sample: GP_Gen4_2 + tier: + description: + - The tier of the particular SKU. + returned: always + type: str + sample: GeneralPurpose + capacity: + description: + - The scale capacity. + returned: always + type: int + sample: 2 + storage_mb: + description: + - The maximum storage allowed for a server. + returned: always + type: int + sample: 128000 + enforce_ssl: + description: + - Enable SSL enforcement. + returned: always + type: bool + sample: False + admin_username: + description: + - The administrator's login name of a server. + returned: always + type: str + sample: serveradmin + version: + description: + - Server version. + returned: always + type: str + sample: "9.6" + user_visible_state: + description: + - A state of a server that is visible to user. + returned: always + type: str + sample: Ready + fully_qualified_domain_name: + description: + - The fully qualified domain name of a server. + returned: always + type: str + sample: myabdud1223.mys.database.azure.com + tags: + description: + - Tags assigned to the resource. Dictionary of string:string pairs. + type: dict + sample: { tag1: abc } +''' + +try: + from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + from azure.core.exceptions import ResourceNotFoundError + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMMariaDbServerInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str' + ), + tags=dict( + type='list', + elements='str' + ) + ) + # store the results of the module operation + self.results = dict( + changed=False + ) + self.resource_group = None + self.name = None + self.tags = None + super(AzureRMMariaDbServerInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False, facts_module=True) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_mariadbserver_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_mariadbserver_facts' module has been renamed to 'azure_rm_mariadbserver_info'", version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if (self.resource_group is not None and + self.name is not None): + self.results['servers'] = self.get() + elif (self.resource_group is not None): + self.results['servers'] = self.list_by_resource_group() + return self.results + + def get(self): + response = None + results = [] + try: + response = self.mariadb_client.servers.get(resource_group_name=self.resource_group, + server_name=self.name) + self.log("Response : {0}".format(response)) + except ResourceNotFoundError as e: + self.log('Could not get facts for MariaDB Server.') + + if response and self.has_tags(response.tags, self.tags): + results.append(self.format_item(response)) + + return results + + def list_by_resource_group(self): + response = None + results = [] + try: + response = self.mariadb_client.servers.list_by_resource_group(resource_group_name=self.resource_group) + self.log("Response : {0}".format(response)) + except Exception as e: + self.log('Could not get facts for MariaDB Servers.') + + if response is not None: + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(self.format_item(item)) + + return results + + def format_item(self, item): + d = item.as_dict() + d = { + 'id': d['id'], + 'resource_group': self.resource_group, + 'name': d['name'], + 'sku': d['sku'], + 'location': d['location'], + 'storage_mb': d['storage_profile']['storage_mb'], + 'version': d['version'], + 'enforce_ssl': (d['ssl_enforcement'] == 'Enabled'), + 'admin_username': d['administrator_login'], + 'user_visible_state': d['user_visible_state'], + 'fully_qualified_domain_name': d['fully_qualified_domain_name'], + 'tags': d.get('tags') + } + + return d + + +def main(): + AzureRMMariaDbServerInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_monitordiagnosticsetting.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_monitordiagnosticsetting.py new file mode 100644 index 000000000..0f08eff9a --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_monitordiagnosticsetting.py @@ -0,0 +1,640 @@ +#!/usr/bin/python +# +# Copyright (c) 2021 Ross Bender (@l3ender) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_monitordiagnosticsetting +version_added: "1.10.0" +short_description: Create, update, or manage Azure Monitor diagnostic settings. + +description: + - Create, update, or manage Azure Monitor diagnostic settings for any type of resource. + +options: + name: + description: + - The name of the diagnostic settings. + type: str + required: true + resource: + description: + - The resource which will be monitored with the diagnostic setting. + - It can be a string containing the resource ID. + - It can be a dictionary containing I(name), I(type), I(resource_group), and optionally I(subscription_id). + - I(name). The resource name. + - I(type). The resource type including namespace, such as 'Microsoft.Network/virtualNetworks'. + - I(resource_group). The resource group containing the resource. + - I(subscription_id). The subscription ID containing the resource. If none is specified, the credential's subscription ID will be used. + type: raw + required: true + storage_account: + description: + - A storage account which will receive the diagnostic logs. + - It can be a string containing the storage account resource ID. + - It can be a dictionary containing I(name) and optionally I(subscription_id) and I(resource_group). + - At least one of I(storage_account), I(log_analytics), or I(event_hub) must be specified for the diagnostic setting. + type: raw + log_analytics: + description: + - A log analytics workspace which will receive the diagnostic logs. + - It can be a string containing the log analytics workspace resource ID. + - It can be a dictionary containing I(name) and optionally I(subscription_id) and I(resource_group). + - At least one of I(storage_account), I(log_analytics), or I(event_hub) must be specified for the diagnostic setting. + type: raw + event_hub: + description: + - An event hub which will receive the diagnostic logs. + - At least one of I(storage_account), I(log_analytics), or I(event_hub) must be specified for the diagnostic setting. + type: dict + suboptions: + namespace: + description: + - The event hub namespace. + type: str + required: true + policy: + description: + - The shared access policy. + type: str + required: true + hub: + description: + - An event hub name to receive logs. If none is specified, the default event hub will be selected. + type: str + resource_group: + description: + - The resource group containing the event hub. If none is specified, the resource group of the I(resource) parameter will be used. + type: str + subscription_id: + description: + - The subscription ID containing the event hub. If none is specified, the subscription ID of the I(resource) parameter will be used. + type: str + logs: + description: + - The list of log setttings. + - At least one of I(metrics) or I(logs) must be specified for the diagnostic setting. + type: list + elements: dict + suboptions: + category: + description: + - Name of a Management Group Diagnostic Log category for a resource type this setting is applied to. + type: str + category_group: + description: + - Name of a Management Group Diagnostic Log category group for a resource type this setting is applied to. + type: str + enabled: + description: + - Whether the log is enabled. + type: bool + default: true + retention_policy: + description: + - The retention policy for this log. + type: dict + suboptions: + days: + description: + - The number of days for the retention policy. + type: int + default: 0 + enabled: + description: + - Whether the retention policy is enabled. + type: bool + default: true + metrics: + description: + - The list of metric setttings. + - At least one of I(metrics) or I(logs) must be specified for the diagnostic setting. + type: list + elements: dict + suboptions: + category: + description: + - Name of a Diagnostic Metric category for a resource type this setting is applied to. + type: str + enabled: + description: + - Whether the metric category is enabled. + type: bool + default: true + retention_policy: + description: + - The retention policy for this metric. + type: dict + suboptions: + days: + description: + - The number of days for the retention policy. + type: int + default: 0 + enabled: + description: + - Whether the retention policy is enabled. + type: bool + default: true + state: + description: + - State of the private endpoint DNS zone group. Use C(present) to create or update and C(absent) to delete. + default: present + type: str + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Ross Bender (@l3ender) +''' + +EXAMPLES = ''' +- name: Create storage-based diagnostic setting for a virtual network + azure_rm_monitordiagnosticsetting: + name: "logs-storage" + resource: "{{ vnet_output.state.id }}" + storage_account: "{{ storage_output.state.id }}" + logs: + - category_group: "allLogs" + metrics: + - category: "AllMetrics" + +- name: Create diagnostic setting for webapp with log analytics, event hub, and storage + azure_rm_monitordiagnosticsetting: + name: "webapp-logs" + resource: + name: "my-webapp" + type: "Microsoft.Web/sites" + resource_group: "my-webapp-resource-group" + event_hub: + namespace: "my-event-hub" + policy: "RootManageSharedAccessKey" + log_analytics: + name: "my-log-analytics-workspace" + resource_group: "my-log-analytics-workspace-resource-group" + storage_account: + name: "mystorageaccount" + logs: + - category: "AppServiceHTTPLogs" + - category: "AppServiceConsoleLogs" + - category: "AppServiceAppLogs" + - category: "AppServiceAuditLogs" + - category: "AppServiceIPSecAuditLogs" + - category: "AppServicePlatformLogs" + +- name: Delete diagnostic setting + azure_rm_monitordiagnosticsetting: + name: "webapp-logs" + resource: + name: "my-webapp" + type: "Microsoft.Web/sites" + resource_group: "my-webapp-resource-group" + state: "absent" +''' + +RETURN = ''' +state: + description: + - The state of the diagnostic setting. + returned: always + type: dict + contains: + id: + description: + - ID of the diagnostic setting. + sample: >- + /subscriptions/xxx/resourcegroups/my-resource-group/providers/microsoft.network/applicationgateways/my-appgw/ + providers/microsoft.insights/diagnosticSettings/my-diagnostic-setting + returned: always + type: str + name: + description: + - Name of the diagnostic setting. + returned: always + type: str + sample: my-diagnostic-setting + logs: + description: + - Enabled log configurations for the diagnostic setting. + returned: always + type: list + elements: dict + contains: + category: + description: + - Name of a Management Group Diagnostic Log category for a resource type this setting is applied to. + type: str + returned: always + category_group: + description: + - Name of a Management Group Diagnostic Log category group for a resource type this setting is applied to. + type: str + returned: always + enabled: + description: + - Whether this log is enabled. + type: bool + returned: always + retention_policy: + description: + - The retention policy for this log. + type: dict + returned: always + contains: + enabled: + description: + - Whether the retention policy is enabled. + type: bool + returned: always + days: + description: + - The number of days for the retention policy. + type: int + returned: always + metrics: + description: + - Enabled metric configurations for the diagnostic setting. + returned: always + type: list + elements: dict + contains: + category: + description: + - Name of a Diagnostic Metric category for a resource type this setting is applied to. + type: str + returned: always + enabled: + description: + - Whether the metric category is enabled. + type: bool + returned: always + retention_policy: + description: + - The retention policy for the metric category. + type: dict + returned: always + contains: + enabled: + description: + - Whether the retention policy is enabled. + type: bool + returned: always + days: + description: + - The number of days for the retention policy. + type: int + returned: always + event_hub: + description: + - The event hub for the diagnostic setting, if configured. + returned: always + type: dict + contains: + id: + description: + - ID of the event hub namespace. + returned: always + type: str + sample: >- + /subscriptions/xxx/resourceGroups/my-resource-group/providers/Microsoft.EventHub/namespaces/my-event-hub-namespace + namespace: + description: + - Name of the event hub namespace. + returned: always + type: str + sample: my-event-hub-namespace + hub: + description: + - Name of the hub within the namespace. + returned: always + type: str + sample: my-event-hub + policy: + description: + - Name of the event hub shared access policy. + returned: always + type: str + sample: RootManageSharedAccessKey + log_analytics: + description: + - The log analytics workspace for the diagnostic setting, if configured. + returned: always + type: dict + contains: + id: + description: + - ID of the log analytics workspace. + returned: always + type: str + sample: >- + /subscriptions/xxx/resourcegroups/my-resource-group/providers/microsoft.operationalinsights/workspaces/my-log-analytics-workspace + storage_account: + description: + - The storage account for the diagnostic setting, if configured. + returned: always + type: dict + contains: + id: + description: + - ID of the storage account. + returned: always + type: str + sample: >- + /subscriptions/xxx/resourceGroups/my-resource-group/providers/Microsoft.Storage/storageAccounts/my-storage-account +''' + +try: + from msrestazure.tools import (parse_resource_id, resource_id) + from azure.core.polling import LROPoller +except ImportError: + # This is handled in azure_rm_common + pass + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt + + +event_hub_spec = dict( + namespace=dict(type="str", required=True), + policy=dict(type="str", required=True), + hub=dict(type="str"), + resource_group=dict(type="str"), + subscription_id=dict(type="str"), +) + +retention_policy_spec = dict( + days=dict(type="int", default=0), + enabled=dict(type="bool", default=True), +) + +logs_spec = dict( + category=dict(type="str"), + category_group=dict(type="str"), + enabled=dict(type="bool", default=True), + retention_policy=dict(type="dict", options=retention_policy_spec), +) + +metrics_spec = dict( + category=dict(type="str"), + enabled=dict(type="bool", default=True), + retention_policy=dict(type="dict", options=retention_policy_spec), +) + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMMonitorDiagnosticSetting(AzureRMModuleBaseExt): + + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type="str", required=True), + resource=dict(type="raw", required=True), + storage_account=dict(type="raw"), + log_analytics=dict(type="raw"), + event_hub=dict(type="dict", options=event_hub_spec), + logs=dict(type="list", elements="dict", options=logs_spec), + metrics=dict(type="list", elements="dict", options=metrics_spec), + state=dict(type="str", default="present", choices=["present", "absent"]), + ) + + self.name = None + self.resource = None + self.state = None + self.parameters = dict() + self.results = dict( + changed=False, + state=dict() + ) + self.to_do = Actions.NoAction + + super(AzureRMMonitorDiagnosticSetting, self).__init__(self.module_arg_spec, + required_if=[ + ("state", "present", ("storage_account", "log_analytics", "event_hub"), True), + ("state", "present", ("logs", "metrics"), True), + ], + supports_tags=False, + supports_check_mode=True) + + def exec_module(self, **kwargs): + for key in self.module_arg_spec: + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + self.parameters[key] = kwargs[key] + + self.process_parameters() + + old_response = self.get_item() + + if old_response is None or not old_response: + if self.state == "present": + self.to_do = Actions.Create + else: + if self.state == "absent": + self.to_do = Actions.Delete + else: + self.results["compare"] = [] + if not self.idempotency_check(old_response, self.diagnostic_setting_to_dict(self.parameters)): + self.to_do = Actions.Update + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.results["changed"] = True + if self.check_mode: + return self.results + response = self.create_update_setting() + elif self.to_do == Actions.Delete: + self.results["changed"] = True + if self.check_mode: + return self.results + response = self.delete_setting() + else: + self.results["changed"] = False + response = old_response + + if response is not None: + self.results["state"] = response + + return self.results + + def process_parameters(self): + if isinstance(self.resource, dict): + if "/" not in self.resource.get("type"): + self.fail("resource type parameter must include namespace, such as 'Microsoft.Network/virtualNetworks'") + self.resource = resource_id(subscription=self.resource.get("subscription_id", self.subscription_id), + resource_group=self.resource.get("resource_group"), + namespace=self.resource.get("type").split("/")[0], + type=self.resource.get("type").split("/")[1], + name=self.resource.get("name")) + + parsed_resource = parse_resource_id(self.resource) + + storage_account = self.parameters.pop("storage_account", None) + if storage_account: + if isinstance(storage_account, dict): + if not storage_account.get("name"): + self.fail("storage_account must contain 'name'") + + storage_account_id = resource_id(subscription=storage_account.get("subscription_id", parsed_resource.get("subscription")), + resource_group=storage_account.get("resource_group", parsed_resource.get("resource_group")), + namespace="Microsoft.Storage", + type="storageAccounts", + name=storage_account.get("name")) + else: + storage_account_id = storage_account + + self.parameters["storage_account_id"] = storage_account_id + + log_analytics = self.parameters.pop("log_analytics", None) + if log_analytics: + if isinstance(log_analytics, dict): + if not log_analytics.get("name"): + self.fail("log_analytics must contain 'name'") + + log_analytics_id = resource_id(subscription=log_analytics.get("subscription_id", parsed_resource.get("subscription")), + resource_group=log_analytics.get("resource_group", parsed_resource.get("resource_group")), + namespace="microsoft.operationalinsights", + type="workspaces", + name=log_analytics.get("name")) + else: + log_analytics_id = log_analytics + + self.parameters["workspace_id"] = log_analytics_id + + event_hub = self.parameters.pop("event_hub", None) + if event_hub: + hub_subscription_id = event_hub.get("subscription_id") if event_hub.get("subscription_id") else parsed_resource.get("subscription") + hub_resource_group = event_hub.get("resource_group") if event_hub.get("resource_group") else parsed_resource.get("resource_group") + auth_rule_id = resource_id(subscription=hub_subscription_id, + resource_group=hub_resource_group, + namespace="Microsoft.EventHub", + type="namespaces", + name=event_hub.get("namespace"), + child_type_1="authorizationrules", + child_name_1=event_hub.get("policy")) + self.parameters["event_hub_authorization_rule_id"] = auth_rule_id + self.parameters["event_hub_name"] = event_hub.get("hub") + + def get_item(self): + self.log("Get diagnostic setting for {0} in {1}".format(self.name, self.resource)) + + try: + item = self.monitor_diagnostic_settings_client.diagnostic_settings.get(resource_uri=self.resource, name=self.name) + return self.diagnostic_setting_to_dict(item) + except Exception: + self.log("Did not find diagnostic setting for {0} in {1}".format(self.name, self.resource)) + + return None + + def create_update_setting(self): + try: + response = self.monitor_diagnostic_settings_client.diagnostic_settings.create_or_update(resource_uri=self.resource, + name=self.name, + parameters=self.parameters) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + + return self.diagnostic_setting_to_dict(response) + except Exception as exc: + self.fail("Error creating or updating diagnostic setting {0} for resource {1}: {2}".format(self.name, self.resource, str(exc))) + + def delete_setting(self): + try: + response = self.monitor_diagnostic_settings_client.diagnostic_settings.delete(resource_uri=self.resource, + name=self.name) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + + return response + except Exception as exc: + self.fail("Error deleting diagnostic setting {0} for resource {1}: {2}".format(self.name, self.resource, str(exc))) + + def diagnostic_setting_to_dict(self, diagnostic_setting): + setting_dict = diagnostic_setting if isinstance(diagnostic_setting, dict) else diagnostic_setting.as_dict() + result = dict( + id=setting_dict.get("id"), + name=setting_dict.get("name"), + event_hub=self.event_hub_dict(setting_dict), + storage_account=self.storage_dict(setting_dict.get("storage_account_id")), + log_analytics=self.log_analytics_dict(setting_dict.get("workspace_id")), + logs=[self.log_config_to_dict(log) for log in setting_dict.get("logs", [])], + metrics=[self.metric_config_to_dict(metric) for metric in setting_dict.get("metrics", [])], + ) + return self.remove_disabled_config(result) + + def remove_disabled_config(self, diagnostic_setting): + diagnostic_setting["logs"] = [log for log in diagnostic_setting.get("logs", []) if log.get("enabled")] + diagnostic_setting["metrics"] = [metric for metric in diagnostic_setting.get("metrics", []) if metric.get("enabled")] + return diagnostic_setting + + def event_hub_dict(self, setting_dict): + auth_rule_id = setting_dict.get("event_hub_authorization_rule_id") + if auth_rule_id: + parsed_rule_id = parse_resource_id(auth_rule_id) + return dict( + id=resource_id(subscription=parsed_rule_id.get("subscription"), + resource_group=parsed_rule_id.get("resource_group"), + namespace=parsed_rule_id.get("namespace"), + type=parsed_rule_id.get("type"), + name=parsed_rule_id.get("name")), + namespace=parsed_rule_id.get("name"), + hub=setting_dict.get("event_hub_name"), + policy=parsed_rule_id.get("resource_name"), + ) + return None + + def storage_dict(self, storage_account_id): + if storage_account_id: + return dict( + id=storage_account_id, + ) + return None + + def log_analytics_dict(self, workspace_id): + if workspace_id: + return dict( + id=workspace_id, + ) + return None + + def log_config_to_dict(self, log_config): + return dict( + category=log_config.get("category"), + category_group=log_config.get("category_group"), + enabled=log_config.get("enabled"), + retention_policy=self.retention_policy_to_dict(log_config.get("retention_policy")), + ) + + def metric_config_to_dict(self, metric_config): + return dict( + category=metric_config.get("category"), + enabled=metric_config.get("enabled"), + retention_policy=self.retention_policy_to_dict(metric_config.get("retention_policy")), + ) + + def retention_policy_to_dict(self, policy): + if policy: + return dict( + days=policy.get("days"), + enabled=policy.get("enabled"), + ) + return None + + +def main(): + AzureRMMonitorDiagnosticSetting() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_monitordiagnosticsetting_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_monitordiagnosticsetting_info.py new file mode 100644 index 000000000..f7081ce79 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_monitordiagnosticsetting_info.py @@ -0,0 +1,366 @@ +#!/usr/bin/python +# +# Copyright (c) 2021 Ross Bender (@l3ender) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_monitordiagnosticsetting_info +version_added: "1.10.0" +short_description: Get Azure Monitor diagnostic setting facts. + +description: + - Get facts for Azure Monitor diagnostic settings for any type of resource. + +options: + name: + description: + - Limit results to a single diagnostic setting within a resource. + type: str + resource: + description: + - The resource which will be monitored with the diagnostic setting. + - It can be a string containing the resource ID. + - It can be a dictionary containing I(name), I(type), I(resource_group), and optionally I(subscription_id). + - I(name). The resource name. + - I(type). The resource type including namespace, such as 'Microsoft.Network/virtualNetworks'. + - I(resource_group). The resource group containing the resource. + - I(subscription_id). The subscription ID containing the resource. If none is specified, the credential's subscription ID will be used. + type: raw + required: true + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Ross Bender (@l3ender) +''' + +EXAMPLES = ''' +- name: Get all diagnostic settings for a resource + azure_rm_monitordiagnosticsetting_info: + resource: "/subscriptions/my-resource-group/resourceGroups/my-resource-group/providers/Microsoft.Web/sites/my-web-app" + +- name: Get all diagnostic settings for a resource using a dictionary + azure_rm_monitordiagnosticsetting_info: + resource: + name: "my-web-app" + type: "Microsoft.Web/sites" + resource_group: "my-resource-group" + +- name: Get a specific diagnostic setting + azure_rm_monitordiagnosticsetting_info: + name: "my-diagnostic-setting" + resource: "/subscriptions/my-resource-group/resourceGroups/my-resource-group/providers/Microsoft.Network/virtualNetworks/my-vnet" +''' + +RETURN = ''' +settings: + description: + - List of diagnostic settings, sorted by name. + returned: always + type: list + elements: dict + contains: + id: + description: + - ID of the diagnostic setting. + sample: >- + /subscriptions/xxx/resourcegroups/my-resource-group/providers/microsoft.network/applicationgateways/my-appgw/ + providers/microsoft.insights/diagnosticSettings/my-diagnostic-setting + returned: always + type: str + name: + description: + - Name of the diagnostic setting. + returned: always + type: str + sample: my-diagnostic-setting + logs: + description: + - Enabled log configurations for the diagnostic setting. + returned: always + type: list + elements: dict + contains: + category: + description: + - Name of a Management Group Diagnostic Log category for a resource type this setting is applied to. + type: str + returned: always + category_group: + description: + - Name of a Management Group Diagnostic Log category group for a resource type this setting is applied to. + type: str + returned: always + enabled: + description: + - Whether this log is enabled. + type: bool + returned: always + retention_policy: + description: + - The retention policy for this log. + type: dict + returned: always + contains: + enabled: + description: + - Whether the retention policy is enabled. + type: bool + returned: always + days: + description: + - The number of days for the retention policy. + type: int + returned: always + metrics: + description: + - Enabled metric configurations for the diagnostic setting. + returned: always + type: list + elements: dict + contains: + category: + description: + - Name of a Diagnostic Metric category for a resource type this setting is applied to. + type: str + returned: always + enabled: + description: + - Whether the metric category is enabled. + type: bool + returned: always + retention_policy: + description: + - The retention policy for the metric category. + type: dict + returned: always + contains: + enabled: + description: + - Whether the retention policy is enabled. + type: bool + returned: always + days: + description: + - The number of days for the retention policy. + type: int + returned: always + event_hub: + description: + - The event hub for the diagnostic setting, if configured. + returned: always + type: dict + contains: + id: + description: + - ID of the event hub namespace. + returned: always + type: str + sample: >- + /subscriptions/xxx/resourceGroups/my-resource-group/providers/Microsoft.EventHub/namespaces/my-event-hub-namespace + namespace: + description: + - Name of the event hub namespace. + returned: always + type: str + sample: my-event-hub-namespace + hub: + description: + - Name of the hub within the namespace. + returned: always + type: str + sample: my-event-hub + policy: + description: + - Name of the event hub shared access policy. + returned: always + type: str + sample: RootManageSharedAccessKey + log_analytics: + description: + - The log analytics workspace for the diagnostic setting, if configured. + returned: always + type: dict + contains: + id: + description: + - ID of the log analytics workspace. + returned: always + type: str + sample: >- + /subscriptions/xxx/resourcegroups/my-resource-group/providers/microsoft.operationalinsights/workspaces/my-log-analytics-workspace + storage_account: + description: + - The storage account for the diagnostic setting, if configured. + returned: always + type: dict + contains: + id: + description: + - ID of the storage account. + returned: always + type: str + sample: >- + /subscriptions/xxx/resourceGroups/my-resource-group/providers/Microsoft.Storage/storageAccounts/my-storage-account +''' + +try: + from msrestazure.tools import (parse_resource_id, resource_id) +except Exception: + # This is handled in azure_rm_common + pass + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt + + +class AzureRMMonitorDiagnosticSettingInfo(AzureRMModuleBaseExt): + + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type="str"), + resource=dict(type="raw", required=True), + ) + + self.results = dict( + changed=False, + settings=[], + ) + + self.name = None + self.resource = None + + super(AzureRMMonitorDiagnosticSettingInfo, self).__init__(self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + facts_module=True) + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()): + setattr(self, key, kwargs[key]) + + self.process_parameters() + + if self.name is not None: + self.results["settings"] = self.get_item() + else: + self.results["settings"] = self.list_items() + + return self.results + + def process_parameters(self): + if isinstance(self.resource, dict): + if "/" not in self.resource.get("type"): + self.fail("resource type parameter must include namespace, such as 'Microsoft.Network/virtualNetworks'") + self.resource = resource_id(subscription=self.resource.get("subscription_id", self.subscription_id), + resource_group=self.resource.get("resource_group"), + namespace=self.resource.get("type").split("/")[0], + type=self.resource.get("type").split("/")[1], + name=self.resource.get("name")) + + def get_item(self): + self.log("Get diagnostic setting for {0} in {1}".format(self.name, self.resource)) + + try: + item = self.monitor_diagnostic_settings_client.diagnostic_settings.get(resource_uri=self.resource, name=self.name) + return [self.diagnostic_setting_to_dict(item)] + except Exception: + self.log("Could not get diagnostic setting for {0} in {1}".format(self.name, self.resource)) + + return [] + + def list_items(self): + self.log("List all diagnostic settings in {0}".format(self.resource)) + try: + items = self.monitor_diagnostic_settings_client.diagnostic_settings.list(resource_uri=self.resource) + items = [self.diagnostic_setting_to_dict(item) for item in items] + items = sorted(items, key=lambda d: d["name"]) + return items + except Exception as exc: + self.fail("Failed to list all diagnostic settings in {0}: {1}".format(self.resource, str(exc))) + + def diagnostic_setting_to_dict(self, diagnostic_setting): + setting_dict = diagnostic_setting if isinstance(diagnostic_setting, dict) else diagnostic_setting.as_dict() + result = dict( + id=setting_dict.get("id"), + name=setting_dict.get("name"), + event_hub=self.event_hub_dict(setting_dict), + storage_account=self.storage_dict(setting_dict.get("storage_account_id")), + log_analytics=self.log_analytics_dict(setting_dict.get("workspace_id")), + logs=[self.log_config_to_dict(log) for log in setting_dict.get("logs", [])], + metrics=[self.metric_config_to_dict(metric) for metric in setting_dict.get("metrics", [])], + ) + return self.remove_disabled_config(result) + + def remove_disabled_config(self, diagnostic_setting): + diagnostic_setting["logs"] = [log for log in diagnostic_setting.get("logs", []) if log.get("enabled")] + diagnostic_setting["metrics"] = [metric for metric in diagnostic_setting.get("metrics", []) if metric.get("enabled")] + return diagnostic_setting + + def event_hub_dict(self, setting_dict): + auth_rule_id = setting_dict.get("event_hub_authorization_rule_id") + if auth_rule_id: + parsed_rule_id = parse_resource_id(auth_rule_id) + return dict( + id=resource_id(subscription=parsed_rule_id.get("subscription"), + resource_group=parsed_rule_id.get("resource_group"), + namespace=parsed_rule_id.get("namespace"), + type=parsed_rule_id.get("type"), + name=parsed_rule_id.get("name")), + namespace=parsed_rule_id.get("name"), + hub=setting_dict.get("event_hub_name"), + policy=parsed_rule_id.get("resource_name"), + ) + return None + + def storage_dict(self, storage_account_id): + if storage_account_id: + return dict( + id=storage_account_id, + ) + return None + + def log_analytics_dict(self, workspace_id): + if workspace_id: + return dict( + id=workspace_id, + ) + return None + + def log_config_to_dict(self, log_config): + return dict( + category=log_config.get("category"), + category_group=log_config.get("category_group"), + enabled=log_config.get("enabled"), + retention_policy=self.retention_policy_to_dict(log_config.get("retention_policy")), + ) + + def metric_config_to_dict(self, metric_config): + return dict( + category=metric_config.get("category"), + enabled=metric_config.get("enabled"), + retention_policy=self.retention_policy_to_dict(metric_config.get("retention_policy")), + ) + + def retention_policy_to_dict(self, policy): + if policy: + return dict( + days=policy.get("days"), + enabled=policy.get("enabled"), + ) + return None + + +def main(): + AzureRMMonitorDiagnosticSettingInfo() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_monitorlogprofile.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_monitorlogprofile.py new file mode 100644 index 000000000..a224c1fba --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_monitorlogprofile.py @@ -0,0 +1,388 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Yunge Zhu, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_monitorlogprofile +version_added: "0.0.1" +short_description: Manage Azure Monitor log profile +description: + - Create, update and delete instance of Azure Monitor log profile. + +options: + name: + description: + - Unique name of the log profile to create or update. + required: True + type: str + location: + description: + - Resource location. + type: str + locations: + description: + - List of regions for which Activity Log events should be stored. + type: list + elements: str + categories: + description: + - List of categories of logs. These categories are created as is convenient to user. Some Values are C(Write), C(Delete) and/or C(Action). + type: list + elements: str + retention_policy: + description: + - Retention policy for events in the log. + type: dict + suboptions: + enabled: + description: + - Whether the retention policy is enabled. + type: bool + days: + description: + - The number of days for the retention. A value of 0 will retain the events indefinitely. + type: int + service_bus_rule_id: + description: + - The service bus rule ID of the service bus namespace in which you would like to have Event Hubs created for streaming in the Activity Log. + - Format like {service_bus_resource_id}/authorizationrules{key_name}. + type: str + storage_account: + description: + - The storage account to which send the Activity Log. + - It could be a resource ID. + - It could be a dict containing I(resource_grorup) and I(name). + type: raw + state: + description: + - Assert the state of the log profile. + - Use C(present) to create or update a log profile and C(absent) to delete it. + default: present + type: str + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Yunge Zhu(@yungezz) + +''' + +EXAMPLES = ''' + - name: Create a log profile + azure_rm_monitorlogprofile: + name: myProfile + location: eastus + locations: + - eastus + - westus + categories: + - Write + - Action + retention_policy: + enabled: False + days: 1 + storage_account: + resource_group: myResourceGroup + name: myStorageAccount + register: output + + - name: Delete a log profile + azure_rm_monitorlogprofile: + name: myProfile + state: absent +''' + +RETURN = ''' +id: + description: + - ID of the log profile. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/providers/microsoft.insights/logprofiles/myProfile + +''' + +import time + +try: + from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id + from azure.core.exceptions import HttpResponseError + from azure.core.polling import LROPoller + from msrestazure.tools import is_valid_resource_id + from azure.mgmt.monitor.models import (RetentionPolicy, LogProfileResource) +except ImportError: + # This is handled in azure_rm_common + pass + + +retention_policy_spec = dict( + enabled=dict(type='bool'), + days=dict(type='int') +) + + +def logprofile_to_dict(profile): + return dict( + id=profile.id, + name=profile.name, + location=profile.location, + locations=profile.locations, + categories=profile.categories, + storage_account=profile.storage_account_id, + service_bus_rule_id=profile.service_bus_rule_id, + retention_policy=dict( + enabled=profile.retention_policy.enabled, + days=profile.retention_policy.days + ), + tags=profile.tags if profile.tags else None + ) + + +class Actions: + NoAction, CreateOrUpdate, Delete = range(3) + + +class AzureRMMonitorLogprofile(AzureRMModuleBase): + """Configuration class for an Azure RM Monitor log profile""" + + def __init__(self): + self.module_arg_spec = dict( + name=dict( + type='str', + required=True + ), + location=dict( + type='str' + ), + locations=dict( + type='list', + elements='str' + ), + categories=dict( + type='list', + elements='str' + ), + retention_policy=dict( + type='dict', + options=retention_policy_spec + ), + service_bus_rule_id=dict( + type='str' + ), + storage_account=dict( + type='raw' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self._client = None + + self.name = None + self.location = None + + self.locations = None + self.categories = None + self.retention_policy = False + self.service_bus_rule_id = None + self.storage_account = None + + self.tags = None + + self.results = dict( + changed=False, + id=None + ) + self.state = None + + super(AzureRMMonitorLogprofile, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + old_response = None + response = None + to_be_updated = False + + # get storage account id + if self.storage_account: + if isinstance(self.storage_account, dict): + self.storage_account = format_resource_id(val=self.storage_account['name'], + subscription_id=self.storage_account.get('subscription') or self.subscription_id, + namespace='Microsoft.Storage', + types='storageAccounts', + resource_group=self.storage_account.get('resource_group')) + elif not is_valid_resource_id(self.storage_account): + self.fail("storage_account either be a resource id or a dict containing resource_group and name") + + # get existing log profile + old_response = self.get_logprofile() + + if old_response: + self.results['id'] = old_response['id'] + + if self.state == 'present': + # if profile not exists, create new + if not old_response: + self.log("Log profile instance doesn't exist") + + to_be_updated = True + self.to_do = Actions.CreateOrUpdate + + else: + # log profile exists already, do update + self.log("Log profile instance already exists") + + update_tags, self.tags = self.update_tags(old_response.get('tags', None)) + + if update_tags: + to_be_updated = True + self.to_do = Actions.CreateOrUpdate + + # check if update + if self.check_update(old_response): + to_be_updated = True + self.to_do = Actions.CreateOrUpdate + + elif self.state == 'absent': + if old_response: + self.log("Delete log profile instance") + self.results['id'] = old_response['id'] + to_be_updated = True + self.to_do = Actions.Delete + else: + self.results['changed'] = False + self.log("Log profile {0} not exists.".format(self.name)) + + if to_be_updated: + self.log('Need to Create/Update log profile') + self.results['changed'] = True + + if self.check_mode: + return self.results + + if self.to_do == Actions.CreateOrUpdate: + response = self.create_or_update_logprofile() + self.results['id'] = response['id'] + + if self.to_do == Actions.Delete: + self.delete_logprofile() + self.log('Log profile instance deleted') + + return self.results + + def check_update(self, existing): + if self.locations is not None and existing['locations'] != self.locations: + self.log("locations diff: origin {0} / update {1}".format(existing['locations'], self.locations)) + return True + if self.retention_policy is not None: + if existing['retention_policy']['enabled'] != self.retention_policy['enabled']: + self.log("retention_policy diff: origin {0} / update {1}".format(str(existing['sku']['name']), str(self.retention_policy['enabled']))) + return True + if existing['retention_policy']['days'] != self.retention_policy['days']: + self.log("retention_policy diff: origin {0} / update {1}".format(existing['retention_policy']['days'], str(self.retention_policy['days']))) + return True + if self.storage_account is not None and existing['storage_account'] != self.storage_account: + self.log("storage_account diff: origin {0} / update {1}".format(existing['storage_account'], self.storage_account)) + return True + if self.service_bus_rule_id is not None and existing['service_bus_rule_id'] != self.service_bus_rule_id: + self.log("service_bus_rule_id diff: origin {0} / update {1}".format(existing['service_bus_rule_id'], self.service_bus_rule_id)) + return True + return False + + def create_or_update_logprofile(self): + ''' + Creates or Update log profile. + + :return: deserialized log profile state dictionary + ''' + self.log( + "Creating log profile instance {0}".format(self.name)) + + try: + params = LogProfileResource( + location=self.location, + locations=self.locations, + categories=self.categories, + retention_policy=RetentionPolicy(days=self.retention_policy['days'], + enabled=self.retention_policy['enabled']) if self.retention_policy else None, + storage_account_id=self.storage_account if self.storage_account else None, + service_bus_rule_id=self.service_bus_rule_id if self.service_bus_rule_id else None, + tags=self.tags + ) + + response = self.monitor_log_profiles_client.log_profiles.create_or_update(log_profile_name=self.name, + parameters=params) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + + except HttpResponseError as exc: + self.log('Error attempting to create/update log profile.') + self.fail("Error creating/updating log profile: {0}".format(str(exc))) + return logprofile_to_dict(response) + + def delete_logprofile(self): + ''' + Deletes specified log profile. + + :return: True + ''' + self.log("Deleting the log profile instance {0}".format(self.name)) + try: + response = self.monitor_log_profiles_client.log_profiles.delete(log_profile_name=self.name) + except HttpResponseError as e: + self.log('Error attempting to delete the log profile.') + self.fail( + "Error deleting the log profile: {0}".format(str(e))) + return True + + def get_logprofile(self): + ''' + Gets the properties of the specified log profile. + + :return: log profile state dictionary + ''' + self.log("Checking if the log profile {0} is present".format(self.name)) + + response = None + + try: + response = self.monitor_log_profiles_client.log_profiles.get(log_profile_name=self.name) + + self.log("Response : {0}".format(response)) + self.log("log profile : {0} found".format(response.name)) + return logprofile_to_dict(response) + + except HttpResponseError: + self.log("Didn't find log profile {0}".format(self.name)) + + return False + + +def main(): + """Main execution""" + AzureRMMonitorLogprofile() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_multiplemanageddisks.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_multiplemanageddisks.py new file mode 100644 index 000000000..76a57a659 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_multiplemanageddisks.py @@ -0,0 +1,737 @@ +#!/usr/bin/python +# +# Copyright (c) 2022 Aubin Bikouo (@abikouo) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_multiplemanageddisks + +version_added: "1.14.0" + +short_description: Manage Multiple Azure Manage Disks + +description: + - Create, update and delete one or more Azure Managed Disk. + - This module can be used also to attach/detach disks to/from one or more virtual machines. + +options: + state: + description: + - Assert the state of the managed disks. + - Use C(present) to create or update managed disks and/or attach/detach managed disks to a list of + VMs depending on the value specified in I(managed_by_extended). + - Use C(absent) to detach/delete managed disks depending on the value specified in I(managed_by_extended). + default: present + type: str + choices: + - absent + - present + managed_disks: + description: + - List of managed disks to create, update, or delete. + type: list + elements: dict + suboptions: + resource_group: + description: + - Name of a resource group where the managed disk exists or will be created. + required: true + type: str + name: + description: + - Name of the managed disk. + required: true + type: str + location: + description: + - Valid Azure location. Defaults to location of the resource group. + type: str + storage_account_type: + description: + - Type of storage for the managed disk. + - If not specified, the disk is created as C(Standard_LRS). + - C(Standard_LRS) is for Standard HDD. + - C(StandardSSD_LRS) (added in 2.8) is for Standard SSD. + - C(StandardSSD_ZRS) is for Standard SSD Zone-redundant. + - C(Premium_LRS) is for Premium SSD. + - C(Premium_ZRS) is for Premium SSD Zone-redundant. + - C(UltraSSD_LRS) (added in 2.8) is for Ultra SSD, which is only available on select instance types. + - See U(https://docs.microsoft.com/en-us/azure/virtual-machines/windows/disks-types) for more information about disk types. + choices: + - Standard_LRS + - StandardSSD_LRS + - StandardSSD_ZRS + - Premium_LRS + - Premium_ZRS + - UltraSSD_LRS + type: str + create_option: + description: + - C(import) from a VHD file in I(source_uri) and C(copy) from previous managed disk I(source_uri). + type: str + choices: + - empty + - import + - copy + storage_account_id: + description: + - The full path to the storage account the image is to be imported from. + - Required when I(create_option=import). + type: str + source_uri: + description: + - URI to a valid VHD file to be used or the resource ID of the managed disk to copy. + - Required when I(create_option=import) or I(create_option=copy). + aliases: + - source_resource_uri + type: str + os_type: + description: + - Type of Operating System. + - Used when I(create_option=copy) or I(create_option=import) and the source is an OS disk. + - If omitted during creation, no value is set. + - If omitted during an update, no change is made. + - Once set, this value cannot be cleared. + choices: + - linux + - windows + type: str + disk_size_gb: + description: + - Size in GB of the managed disk to be created. + - Required when I(create_option=empty). + - If I(create_option=copy) then the value must be greater than or equal to the source's size. + type: int + max_shares: + description: + - The maximum number of VMs that can attach to the disk at the same time. + - Value greater than one indicates a disk that can be mounted on multiple VMs at the same time. + type: int + attach_caching: + description: + - Disk caching policy controlled by VM. Will be used when attached to the VM defined by C(managed_by). + - If this option is different from the current caching policy, the managed disk will be deattached + and attached with current caching option again. + choices: + - '' + - read_only + - read_write + type: str + zone: + description: + - The Azure managed disk's zone. + - Allowed values are C(1), C(2), C(3) and C(''). + choices: + - '1' + - '2' + - '3' + - '' + type: str + lun: + description: + - The logical unit number for data disk. + - This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM. + type: int + managed_by_extended: + description: + - List of name and resource group of the VMs to managed disks. + - When I(state=present), the disks will be attached to the list of VMs specified. + - When I(state=present), use I([]) to detach disks from all the VMs. + - When I(state=absent) and this parameter is defined, the disks will be detached from the list of VMs. + - When I(state=absent) and this parameter is not defined, the disks will be deleted. + type: list + elements: dict + suboptions: + resource_group: + description: + - The resource group of the attache VM. + type: str + name: + description: + - The name of the attache VM. + type: str + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags +author: + - Aubin Bikouo (@abikouo) +''' + +EXAMPLES = ''' + - name: Create managed operating system disks from page blob and attach them to a list of VMs + azure_rm_multiplemanageddisks: + managed_disks: + - name: mymanageddisk1 + location: eastus2 + resource_group: myResourceGroup + create_option: import + source_uri: https://storageaccountname.blob.core.windows.net/containername/blob-name.vhd + storage_account_id: /subscriptions//resourceGroups/myResourceGroup/providers/Microsoft.Storage/storageAccounts/storageaccountname + os_type: windows + storage_account_type: Premium_LRS + - name: mymanageddisk2 + location: eastus2 + resource_group: myResourceGroup + create_option: import + source_uri: https://storageaccountname.blob.core.windows.net/containername/blob-name.vhd + storage_account_id: /subscriptions//resourceGroups/myResourceGroup/providers/Microsoft.Storage/storageAccounts/storageaccountname + os_type: windows + storage_account_type: Premium_LRS + managed_by_extended: + - resource_group: myResourceGroupTest + name: TestVM + + - name: Detach disks from the VMs specified in the list + azure_rm_multiplemanageddisks: + state: absent + managed_disks: + - name: mymanageddisk1 + location: eastus2 + resource_group: myResourceGroup + create_option: import + source_uri: https://storageaccountname.blob.core.windows.net/containername/blob-name.vhd + storage_account_id: /subscriptions//resourceGroups/myResourceGroup/providers/Microsoft.Storage/storageAccounts/storageaccountname + os_type: windows + storage_account_type: Premium_LRS + - name: mymanageddisk2 + location: eastus2 + resource_group: myResourceGroup + create_option: import + source_uri: https://storageaccountname.blob.core.windows.net/containername/blob-name.vhd + storage_account_id: /subscriptions//resourceGroups/myResourceGroup/providers/Microsoft.Storage/storageAccounts/storageaccountname + os_type: windows + storage_account_type: Premium_LRS + managed_by_extended: + - resource_group: myResourceGroupTest + name: TestVM1 + - resource_group: myResourceGroupTest + name: TestVM2 + + - name: Detach managed disks from all VMs without deletion + azure_rm_multiplemanageddisks: + state: present + managed_disks: + - name: mymanageddisk1 + location: eastus2 + resource_group: myResourceGroup + - name: mymanageddisk2 + location: eastus2 + resource_group: myResourceGroup + managed_by_extended: [] + + - name: Detach managed disks from all VMs and delete them + azure_rm_multiplemanageddisks: + state: absent + managed_disks: + - name: mymanageddisk1 + location: eastus2 + resource_group: myResourceGroup + - name: mymanageddisk2 + location: eastus2 + resource_group: myResourceGroup +''' + +RETURN = ''' +state: + description: + - Current state of the managed disks. + returned: always + type: complex + contains: + id: + description: + - Resource id. + type: str + name: + description: + - Name of the managed disk. + type: str + location: + description: + - Valid Azure location. + type: str + storage_account_type: + description: + - Type of storage for the managed disk. + - See U(https://docs.microsoft.com/en-us/azure/virtual-machines/windows/disks-types) for more information about this type. + type: str + sample: Standard_LRS + create_option: + description: + - Create option of the disk. + type: str + sample: copy + storage_account_id: + description: + - The full path to the storage account the image is to be imported from + type: str + sample: /subscriptions//resourceGroups//providers/Microsoft.Storage/storageAccounts/ + source_uri: + description: + - URI to a valid VHD file to be used or the resource ID of the managed disk to copy. + type: str + os_type: + description: + - Type of Operating System. + type: str + sample: linux + disk_size_gb: + description: + - Size in GB of the managed disk to be created. + type: str + managed_by: + description: + - Name of an existing virtual machine with which the disk is or will be associated, this VM should be in the same resource group. + type: str + max_shares: + description: + - The maximum number of VMs that can attach to the disk at the same time. + - Value greater than one indicates a disk that can be mounted on multiple VMs at the same time. + type: int + sample: 3 + managed_by_extended: + description: + - List ID of an existing virtual machine with which the disk is or will be associated. + type: list + sample: ["/subscriptions/xxx-xxx/resourceGroups/myRG/providers/Microsoft.Compute/virtualMachines/testVM"] + tags: + description: + - Tags to assign to the managed disk. + type: dict + sample: { "tag": "value" } +''' + + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +try: + from azure.core.exceptions import ResourceNotFoundError, AzureError + from msrestazure.tools import parse_resource_id + import time +except ImportError: + # This is handled in azure_rm_common + pass + + +# duplicated in azure_rm_manageddisk_facts +def managed_disk_to_dict(managed_disk): + create_data = managed_disk.creation_data + return dict( + id=managed_disk.id, + name=managed_disk.name, + location=managed_disk.location, + tags=managed_disk.tags, + create_option=create_data.create_option.lower(), + source_uri=create_data.source_uri or create_data.source_resource_id, + disk_size_gb=managed_disk.disk_size_gb, + os_type=managed_disk.os_type.lower() if managed_disk.os_type else None, + storage_account_type=managed_disk.sku.name if managed_disk.sku else None, + managed_by=managed_disk.managed_by, + max_shares=managed_disk.max_shares, + managed_by_extended=managed_disk.managed_by_extended, + zone=managed_disk.zones[0] if managed_disk.zones and len(managed_disk.zones) > 0 else '' + ) + + +class AzureRMMultipleManagedDisk(AzureRMModuleBase): + """Configuration class for an Azure RM Managed Disk resource""" + + def __init__(self): + + managed_by_extended_spec = dict( + resource_group=dict(type='str'), + name=dict(type='str') + ) + managed_disks_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + location=dict( + type='str' + ), + storage_account_type=dict( + type='str', + choices=['Standard_LRS', 'StandardSSD_LRS', 'StandardSSD_ZRS', 'Premium_LRS', 'Premium_ZRS', 'UltraSSD_LRS'] + ), + create_option=dict( + type='str', + choices=['empty', 'import', 'copy'] + ), + storage_account_id=dict( + type='str' + ), + source_uri=dict( + type='str', + aliases=['source_resource_uri'] + ), + os_type=dict( + type='str', + choices=['linux', 'windows'] + ), + disk_size_gb=dict( + type='int' + ), + zone=dict( + type='str', + choices=['', '1', '2', '3'] + ), + attach_caching=dict( + type='str', + choices=['', 'read_only', 'read_write'] + ), + lun=dict( + type='int' + ), + max_shares=dict( + type='int' + ), + ) + + self.module_arg_spec = dict( + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ), + managed_disks=dict( + type='list', + elements='dict', + options=managed_disks_spec, + ), + managed_by_extended=dict( + type='list', + elements='dict', + options=managed_by_extended_spec, + ), + ) + self.results = dict( + changed=False, + state=list()) + + super(AzureRMMultipleManagedDisk, self).__init__( + derived_arg_spec=self.module_arg_spec, + supports_tags=True) + + def validate_disks_parameter(self): + errors = [] + create_option_reqs = [ + ('import', ['source_uri', 'storage_account_id']), + ('copy', ['source_uri']), + ('empty', ['disk_size_gb']) + ] + for disk in self.managed_disks: + create_option = disk.get("create_option") + for req in create_option_reqs: + if create_option == req[0] and any((disk.get(opt) is None for opt in req[1])): + errors.append("managed disk {0}/{1} has create_option set to {2} but not all required parameters ({3}) are set.".format( + disk.get("resource_group"), disk.get("name"), req[0], ",".join(req[1]))) + if errors: + self.fail(msg="Some required options are missing from managed disks configuration.", errors=errors) + + def generate_disk_parameters(self, location, tags, zone=None, + storage_account_type=None, disk_size_gb=None, create_option=None, + source_uri=None, storage_account_id=None, os_type=None, max_shares=None, **kwargs): + disk_params = {} + creation_data = {} + disk_params['location'] = location + disk_params['tags'] = tags + if zone: + disk_params['zones'] = [zone] + if storage_account_type: + storage = self.compute_models.DiskSku(name=storage_account_type) + disk_params['sku'] = storage + disk_params['disk_size_gb'] = disk_size_gb + creation_data['create_option'] = self.compute_models.DiskCreateOption.empty + if create_option == 'import': + creation_data['create_option'] = self.compute_models.DiskCreateOption.import_enum + creation_data['source_uri'] = source_uri + creation_data['source_account_id'] = storage_account_id + elif create_option == 'copy': + creation_data['create_option'] = self.compute_models.DiskCreateOption.copy + creation_data['source_resource_id'] = source_uri + if os_type: + disk_params['os_type'] = self.compute_models.OperatingSystemTypes(os_type.capitalize()) + else: + disk_params['os_type'] = None + if max_shares: + disk_params['max_shares'] = max_shares + disk_params['creation_data'] = creation_data + return disk_params + + def get_disk_instance(self, managed_disk): + resource_group = self.get_resource_group(managed_disk.get("resource_group")) + managed_disk["location"] = managed_disk.get("location") or resource_group.location + disk_instance = self.get_managed_disk(resource_group=managed_disk.get("resource_group"), name=managed_disk.get("name")) + if disk_instance is not None: + for key in ("create_option", 'source_uri', 'disk_size_gb', 'os_type', 'zone'): + if managed_disk.get(key) is None: + managed_disk[key] = disk_instance.get(key) + parameter = self.generate_disk_parameters(tags=self.tags, **managed_disk) + + return parameter, disk_instance + + def exec_module(self, **kwargs): + """Main module execution method""" + self.tags = kwargs.get("tags") + + state = kwargs.get("state") + self.managed_disks = kwargs.get("managed_disks") + self.managed_by_extended = kwargs.get("managed_by_extended") + + self.validate_disks_parameter() + + managed_vm_id = [] + if self.managed_by_extended: + managed_vm_id = [self._get_vm(vm['resource_group'], vm['name']) for vm in self.managed_by_extended] + + if state == "present": + return self.create_or_attach_disks(managed_vm_id) + elif state == "absent": + return self.detach_or_delete_disks(managed_vm_id) + + def compute_disks_result(self, disk_instances): + result = [] + for params, disk in disk_instances: + disk_id = parse_resource_id(disk.get("id")) + result.append(self.get_managed_disk(resource_group=disk_id.get("resource_group"), name=disk_id.get("resource_name"))) + return result + + def create_or_attach_disks(self, managed_vm_id): + changed, disk_instances, disks_to_create = False, [], [] + for disk in self.managed_disks: + parameter, disk_instance = self.get_disk_instance(disk) + # create or update disk + disk_info_to_compare = dict(zone=disk.get("zone"), max_shares=disk.get("max_shares"), found_disk=disk_instance, new_disk=parameter) + if disk_instance is None or self.is_different(**disk_info_to_compare): + disks_to_create.append((disk, parameter)) + else: + disk_instances.append((disk, disk_instance)) + + if len(disks_to_create) > 0: + changed = True + result = self.create_or_update_disks(disks_to_create) + disk_instances += result + + if self.managed_by_extended is not None and len(self.managed_by_extended) > 0: + # Attach the disk to multiple VM + attach_config = [] + for vm in managed_vm_id: + time.sleep(5) + disks = [(d, i) for d, i in disk_instances if not self._is_disk_attached_to_vm(vm.id, i)] + if len(disks) > 0: + attach_config.append(self.create_attachment_configuration(vm, disks)) + + if len(attach_config) > 0: + changed = True + self.update_virtual_machines(attach_config) + + elif self.managed_by_extended == []: + # Detach disks from all VMs attaching them + changed = self.detach_disks_from_all_vms(disk_instances) or changed + + return dict(changed=changed, state=self.compute_disks_result(disk_instances)) + + def detach_or_delete_disks(self, managed_vm_id): + changed, disk_instances = False, [] + for disk in self.managed_disks: + params, disk_instance = self.get_disk_instance(disk) + if disk_instance is not None: + disk_instances.append((disk, disk_instance)) + + result = [] + if self.managed_by_extended is not None and len(self.managed_by_extended) > 0: + # Detach the disk from list of VMs + disks_names = [d.get("name").lower() for p, d in disk_instances] + attach_config = [] + for vm in managed_vm_id: + disks = [d for p, d in disk_instances if self._is_disk_attached_to_vm(vm.id, d)] + if len(disks) > 0: + attach_config.append(self.create_detachment_configuration(vm, disks_names)) + + if len(attach_config) > 0: + changed = True + self.update_virtual_machines(attach_config) + result = self.compute_disks_result(disk_instances) + + elif self.managed_by_extended is None: + # Detach disks from all VMs attaching them + changed = self.detach_disks_from_all_vms(disk_instances) + + # Delete existing disks + if len(disk_instances) > 0: + disks_ids = [disk.get("id") for param, disk in disk_instances] + changed = True + self.delete_disks(disks_ids) + + return dict(changed=changed, state=result) + + def detach_disks_from_all_vms(self, disk_instances): + changed = False + # Detach disk to all VMs attaching it + unique_vm_id = [] + for param, disk_instance in disk_instances: + managed_by_vm = disk_instance.get("managed_by") + managed_by_extended_vms = disk_instance.get("managed_by_extended") or [] + if managed_by_vm is not None and managed_by_vm not in unique_vm_id: + unique_vm_id.append(managed_by_vm) + for vm_id in managed_by_extended_vms: + if vm_id not in unique_vm_id: + unique_vm_id.append(vm_id) + if unique_vm_id: + disks_names = [instance.get("name").lower() for d, instance in disk_instances] + changed = True + attach_config = [] + for vm_id in unique_vm_id: + vm_name_id = parse_resource_id(vm_id) + vm_instance = self._get_vm(vm_name_id['resource_group'], vm_name_id['resource_name']) + attach_config.append(self.create_detachment_configuration(vm_instance, disks_names)) + + if len(attach_config) > 0: + changed = True + self.update_virtual_machines(attach_config) + return changed + + def _is_disk_attached_to_vm(self, vm_id, item): + managed_by = item['managed_by'] + managed_by_extended = item['managed_by_extended'] + if managed_by is not None and vm_id == managed_by: + return True + if managed_by_extended is not None and vm_id in managed_by_extended: + return True + return False + + def create_attachment_configuration(self, vm, disks): + vm_id = parse_resource_id(vm.id) + + # attach all disks to the virtual machine + for managed_disk, disk_instance in disks: + lun = managed_disk.get("lun") + if lun is None: + luns = ([d.lun for d in vm.storage_profile.data_disks] if vm.storage_profile.data_disks else []) + lun = 0 + while True: + if lun not in luns: + break + lun = lun + 1 + for item in vm.storage_profile.data_disks: + if item.name == managed_disk.get("name"): + lun = item.lun + + # prepare the data disk + params = self.compute_models.ManagedDiskParameters(id=disk_instance.get('id'), storage_account_type=disk_instance.get('storage_account_type')) + attach_caching = managed_disk.get("attach_caching") + caching_options = self.compute_models.CachingTypes[attach_caching] if attach_caching and attach_caching != '' else None + + # pylint: disable=missing-kwoa + data_disk = self.compute_models.DataDisk(lun=lun, + create_option=self.compute_models.DiskCreateOptionTypes.attach, + managed_disk=params, + caching=caching_options) + vm.storage_profile.data_disks.append(data_disk) + return vm_id["resource_group"], vm_id["resource_name"], vm + + def create_detachment_configuration(self, vm_instance, disks_names): + vm_data = parse_resource_id(vm_instance.id) + leftovers = [d for d in vm_instance.storage_profile.data_disks if d.name.lower() not in disks_names] + if len(vm_instance.storage_profile.data_disks) == len(leftovers): + self.fail("None of the following disks '{0}' are attached to the VM '{1}/{2}'.".format( + disks_names, vm_data["resource_group"], vm_data["resource_name"] + )) + vm_instance.storage_profile.data_disks = leftovers + return vm_data["resource_group"], vm_data["resource_name"], vm_instance + + def _get_vm(self, resource_group, name): + try: + return self.compute_client.virtual_machines.get(resource_group, name, expand='instanceview') + except Exception as exc: + self.fail("Error getting virtual machine {0}/{1} - {2}".format(resource_group, name, str(exc))) + + def create_or_update_disks(self, disks_to_create): + pollers = [] + for disk_info, disk in disks_to_create: + resource_group = disk_info.get("resource_group") + name = disk_info.get("name") + try: + poller = self.compute_client.disks.begin_create_or_update(resource_group, name, disk) + pollers.append(poller) + except Exception as e: + self.fail("Error creating the managed disk {0}/{1}: {2}".format(resource_group, name, str(e))) + disks_instances = self.get_multiple_pollers_results(pollers) + result = [] + for i, instance in enumerate(disks_instances): + result.append((disks_to_create[i][0], managed_disk_to_dict(instance))) + return result + + # This method accounts for the difference in structure between the + # Azure retrieved disk and the parameters for the new disk to be created. + def is_different(self, zone, max_shares, found_disk, new_disk): + resp = False + if new_disk.get('disk_size_gb'): + if not found_disk['disk_size_gb'] == new_disk['disk_size_gb']: + resp = True + if new_disk.get('os_type'): + if found_disk['os_type'] is None or not self.compute_models.OperatingSystemTypes(found_disk['os_type'].capitalize()) == new_disk['os_type']: + resp = True + if new_disk.get('sku'): + if not found_disk['storage_account_type'] == new_disk['sku'].name: + resp = True + # Check how to implement tags + if new_disk.get('tags') is not None: + if not found_disk['tags'] == new_disk['tags']: + resp = True + if zone is not None: + if not found_disk['zone'] == zone: + resp = True + if max_shares is not None: + if not found_disk['max_shares'] == max_shares: + resp = True + return resp + + def delete_disks(self, ids): + pollers = [] + for disk_id in ids: + try: + disk = parse_resource_id(disk_id) + resource_group, name = disk.get("resource_group"), disk.get("resource_name") + poller = self.compute_client.disks.begin_delete(resource_group, name) + pollers.append(poller) + except Exception as e: + self.fail("Error deleting the managed disk {0}/{1}: {2}".format(resource_group, name, str(e))) + return self.get_multiple_pollers_results(pollers) + + def update_virtual_machines(self, config): + pollers = [] + for resource_group, name, params in config: + try: + poller = self.compute_client.virtual_machines.begin_create_or_update(resource_group, name, params) + pollers.append(poller) + except AzureError as exc: + self.fail("Error updating virtual machine (attaching/detaching disks) {0}/{1} - {2}".format(resource_group, name, exc.message)) + return self.get_multiple_pollers_results(pollers) + + def get_managed_disk(self, resource_group, name): + try: + resp = self.compute_client.disks.get(resource_group, name) + return managed_disk_to_dict(resp) + except ResourceNotFoundError: + self.log("Did not find managed disk {0}/{1}".format(resource_group, name)) + + +def main(): + """Main execution""" + AzureRMMultipleManagedDisk() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mysqlconfiguration.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mysqlconfiguration.py new file mode 100644 index 000000000..6f4f477ab --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mysqlconfiguration.py @@ -0,0 +1,233 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Zim Kalinowski, (@zikalino) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_mysqlconfiguration +version_added: "0.1.2" +short_description: Manage Configuration instance +description: + - Create, update and delete instance of Configuration. + +options: + resource_group: + description: + - The name of the resource group that contains the resource. + required: True + server_name: + description: + - The name of the server. + required: True + name: + description: + - The name of the server configuration. + required: True + value: + description: + - Value of the configuration. + state: + description: + - Assert the state of the MySQL configuration. Use C(present) to update setting, or C(absent) to reset to default value. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Update SQL Server setting + azure_rm_mysqlconfiguration: + resource_group: myResourceGroup + server_name: myServer + name: event_scheduler + value: "ON" +''' + +RETURN = ''' +id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMySQL/servers/myServer/confi + gurations/event_scheduler" +''' + +import time + +try: + from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + from azure.core.exceptions import ResourceNotFoundError + from azure.core.polling import LROPoller + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMMySqlConfiguration(AzureRMModuleBase): + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + server_name=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + value=dict( + type='str' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.server_name = None + self.name = None + self.value = None + + self.results = dict(changed=False) + self.state = None + self.to_do = Actions.NoAction + + super(AzureRMMySqlConfiguration, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=False) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()): + if hasattr(self, key): + setattr(self, key, kwargs[key]) + + old_response = None + response = None + + old_response = self.get_configuration() + + if not old_response: + self.log("Configuration instance doesn't exist") + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + self.log("Configuration instance already exists") + if self.state == 'absent' and old_response['source'] == 'user-override': + self.to_do = Actions.Delete + elif self.state == 'present': + self.log("Need to check if Configuration instance has to be deleted or may be updated") + if self.value != old_response.get('value'): + self.to_do = Actions.Update + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log("Need to Create / Update the Configuration instance") + + if self.check_mode: + self.results['changed'] = True + return self.results + + response = self.create_update_configuration() + + self.results['changed'] = True + self.log("Creation / Update done") + elif self.to_do == Actions.Delete: + self.log("Configuration instance deleted") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_configuration() + else: + self.log("Configuration instance unchanged") + self.results['changed'] = False + response = old_response + + if response: + self.results["id"] = response["id"] + + return self.results + + def create_update_configuration(self): + self.log("Creating / Updating the Configuration instance {0}".format(self.name)) + + try: + response = self.mysql_client.configurations.begin_create_or_update(resource_group_name=self.resource_group, + server_name=self.server_name, + configuration_name=self.name, + parameters={'value': self.value, 'source': 'user-override'}) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + + except Exception as exc: + self.log('Error attempting to create the Configuration instance.') + self.fail("Error creating the Configuration instance: {0}".format(str(exc))) + return response.as_dict() + + def delete_configuration(self): + self.log("Deleting the Configuration instance {0}".format(self.name)) + try: + response = self.mysql_client.configurations.begin_create_or_update(resource_group_name=self.resource_group, + server_name=self.server_name, + configuration_name=self.name, + parameters={'source': 'system-default'}) + except Exception as e: + self.log('Error attempting to delete the Configuration instance.') + self.fail("Error deleting the Configuration instance: {0}".format(str(e))) + + return True + + def get_configuration(self): + self.log("Checking if the Configuration instance {0} is present".format(self.name)) + found = False + try: + response = self.mysql_client.configurations.get(resource_group_name=self.resource_group, + server_name=self.server_name, + configuration_name=self.name) + found = True + self.log("Response : {0}".format(response)) + self.log("Configuration instance : {0} found".format(response.name)) + except ResourceNotFoundError as e: + self.log('Did not find the Configuration instance.') + if found is True: + return response.as_dict() + + return False + + +def main(): + """Main execution""" + AzureRMMySqlConfiguration() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mysqlconfiguration_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mysqlconfiguration_info.py new file mode 100644 index 000000000..48fa416a4 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mysqlconfiguration_info.py @@ -0,0 +1,204 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Zim Kalinowski, (@zikalino) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_mysqlconfiguration_info +version_added: "0.1.2" +short_description: Get Azure MySQL Configuration facts +description: + - Get facts of Azure MySQL Configuration. + +options: + resource_group: + description: + - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. + required: True + type: str + server_name: + description: + - The name of the server. + required: True + type: str + name: + description: + - Setting name. + type: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Get specific setting of MySQL Server + azure_rm_mysqlconfiguration_info: + resource_group: myResourceGroup + server_name: testmysqlserver + name: deadlock_timeout + + - name: Get all settings of MySQL Server + azure_rm_mysqlconfiguration_info: + resource_group: myResourceGroup + server_name: server_name +''' + +RETURN = ''' +settings: + description: + - A list of dictionaries containing MySQL Server settings. + returned: always + type: complex + contains: + id: + description: + - Setting resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMySQL/servers/testmysqlser + ver/configurations/deadlock_timeout" + name: + description: + - Setting name. + returned: always + type: str + sample: deadlock_timeout + value: + description: + - Setting value. + returned: always + type: raw + sample: 1000 + description: + description: + - Description of the configuration. + returned: always + type: str + sample: Deadlock timeout. + source: + description: + - Source of the configuration. + returned: always + type: str + sample: system-default +''' + +try: + from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + from msrestazure.azure_operation import AzureOperationPoller + from azure.core.exceptions import ResourceNotFoundError + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMMySqlConfigurationInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + server_name=dict( + type='str', + required=True + ), + name=dict( + type='str' + ) + ) + # store the results of the module operation + self.results = dict(changed=False) + self.resource_group = None + self.server_name = None + self.name = None + super(AzureRMMySqlConfigurationInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_mysqlconfiguration_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_mysqlconfiguration_facts' module has been renamed to 'azure_rm_mysqlconfiguration_info'", version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.name is not None: + self.results['settings'] = self.get() + else: + self.results['settings'] = self.list_by_server() + return self.results + + def get(self): + ''' + Gets facts of the specified MySQL Configuration. + + :return: deserialized MySQL Configurationinstance state dictionary + ''' + response = None + results = [] + try: + response = self.mysql_client.configurations.get(resource_group_name=self.resource_group, + server_name=self.server_name, + configuration_name=self.name) + self.log("Response : {0}".format(response)) + except ResourceNotFoundError as e: + self.log('Could not get facts for Configurations.') + + if response is not None: + results.append(self.format_item(response)) + + return results + + def list_by_server(self): + ''' + Gets facts of the specified MySQL Configuration. + + :return: deserialized MySQL Configurationinstance state dictionary + ''' + response = None + results = [] + try: + response = self.mysql_client.configurations.list_by_server(resource_group_name=self.resource_group, + server_name=self.server_name) + self.log("Response : {0}".format(response)) + except Exception as e: + self.log('Could not get facts for Configurations.') + + if response is not None: + for item in response: + results.append(self.format_item(item)) + + return results + + def format_item(self, item): + d = item.as_dict() + d = { + 'resource_group': self.resource_group, + 'server_name': self.server_name, + 'id': d['id'], + 'name': d['name'], + 'value': d['value'], + 'description': d['description'], + 'source': d['source'] + } + return d + + +def main(): + AzureRMMySqlConfigurationInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mysqldatabase.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mysqldatabase.py new file mode 100644 index 000000000..09f28fc02 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mysqldatabase.py @@ -0,0 +1,292 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Zim Kalinowski, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_mysqldatabase +version_added: "0.1.2" +short_description: Manage MySQL Database instance +description: + - Create, update and delete instance of MySQL Database. + +options: + resource_group: + description: + - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. + required: True + server_name: + description: + - The name of the server. + required: True + name: + description: + - The name of the database. + required: True + charset: + description: + - The charset of the database. Check MySQL documentation for possible values. + - This is only set on creation, use I(force_update) to recreate a database if the values don't match. + collation: + description: + - The collation of the database. Check MySQL documentation for possible values. + - This is only set on creation, use I(force_update) to recreate a database if the values don't match. + force_update: + description: + - When set to C(true), will delete and recreate the existing MySQL database if any of the properties don't match what is set. + - When set to C(false), no change will occur to the database even if any of the properties do not match. + type: bool + default: 'no' + state: + description: + - Assert the state of the MySQL Database. Use C(present) to create or update a database and C(absent) to delete it. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Create (or update) MySQL Database + azure_rm_mysqldatabase: + resource_group: myResourceGroup + server_name: testserver + name: db1 +''' + +RETURN = ''' +id: + description: + - Resource ID. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMySQL/servers/testserver/databases/db1 +name: + description: + - Resource name. + returned: always + type: str + sample: db1 +''' + +import time + +try: + from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + from azure.core.exceptions import ResourceNotFoundError + from azure.core.polling import LROPoller + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMMySqlDatabase(AzureRMModuleBase): + """Configuration class for an Azure RM MySQL Database resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + server_name=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + charset=dict( + type='str' + ), + collation=dict( + type='str' + ), + force_update=dict( + type='bool', + default=False + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.server_name = None + self.name = None + self.force_update = None + self.parameters = dict() + + self.results = dict(changed=False) + self.state = None + self.to_do = Actions.NoAction + + super(AzureRMMySqlDatabase, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=False) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()): + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + if key == "charset": + self.parameters["charset"] = kwargs[key] + elif key == "collation": + self.parameters["collation"] = kwargs[key] + + old_response = None + response = None + + resource_group = self.get_resource_group(self.resource_group) + + old_response = self.get_mysqldatabase() + + if not old_response: + self.log("MySQL Database instance doesn't exist") + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + self.log("MySQL Database instance already exists") + if self.state == 'absent': + self.to_do = Actions.Delete + elif self.state == 'present': + self.log("Need to check if MySQL Database instance has to be deleted or may be updated") + if ('collation' in self.parameters) and (self.parameters['collation'] != old_response['collation']): + self.to_do = Actions.Update + if ('charset' in self.parameters) and (self.parameters['charset'] != old_response['charset']): + self.to_do = Actions.Update + if self.to_do == Actions.Update: + if self.force_update: + if not self.check_mode: + self.delete_mysqldatabase() + else: + self.fail("Database properties cannot be updated without setting 'force_update' option") + self.to_do = Actions.NoAction + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log("Need to Create / Update the MySQL Database instance") + + if self.check_mode: + self.results['changed'] = True + return self.results + + response = self.create_update_mysqldatabase() + self.results['changed'] = True + self.log("Creation / Update done") + elif self.to_do == Actions.Delete: + self.log("MySQL Database instance deleted") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_mysqldatabase() + # make sure instance is actually deleted, for some Azure resources, instance is hanging around + # for some time after deletion -- this should be really fixed in Azure + while self.get_mysqldatabase(): + time.sleep(20) + else: + self.log("MySQL Database instance unchanged") + self.results['changed'] = False + response = old_response + + if response: + self.results["id"] = response["id"] + self.results["name"] = response["name"] + + return self.results + + def create_update_mysqldatabase(self): + ''' + Creates or updates MySQL Database with the specified configuration. + + :return: deserialized MySQL Database instance state dictionary + ''' + self.log("Creating / Updating the MySQL Database instance {0}".format(self.name)) + + try: + response = self.mysql_client.databases.begin_create_or_update(resource_group_name=self.resource_group, + server_name=self.server_name, + database_name=self.name, + parameters=self.parameters) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + + except Exception as exc: + self.log('Error attempting to create the MySQL Database instance.') + self.fail("Error creating the MySQL Database instance: {0}".format(str(exc))) + return response.as_dict() + + def delete_mysqldatabase(self): + ''' + Deletes specified MySQL Database instance in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the MySQL Database instance {0}".format(self.name)) + try: + response = self.mysql_client.databases.begin_delete(resource_group_name=self.resource_group, + server_name=self.server_name, + database_name=self.name) + except Exception as e: + self.log('Error attempting to delete the MySQL Database instance.') + self.fail("Error deleting the MySQL Database instance: {0}".format(str(e))) + + return True + + def get_mysqldatabase(self): + ''' + Gets the properties of the specified MySQL Database. + + :return: deserialized MySQL Database instance state dictionary + ''' + self.log("Checking if the MySQL Database instance {0} is present".format(self.name)) + found = False + try: + response = self.mysql_client.databases.get(resource_group_name=self.resource_group, + server_name=self.server_name, + database_name=self.name) + found = True + self.log("Response : {0}".format(response)) + self.log("MySQL Database instance : {0} found".format(response.name)) + except ResourceNotFoundError as e: + self.log('Did not find the MySQL Database instance.') + if found is True: + return response.as_dict() + + return False + + +def main(): + """Main execution""" + AzureRMMySqlDatabase() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mysqldatabase_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mysqldatabase_info.py new file mode 100644 index 000000000..9c53a686a --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mysqldatabase_info.py @@ -0,0 +1,203 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Zim Kalinowski, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_mysqldatabase_info +version_added: "0.1.2" +short_description: Get Azure MySQL Database facts +description: + - Get facts of MySQL Database. + +options: + resource_group: + description: + - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. + required: True + type: str + server_name: + description: + - The name of the server. + required: True + type: str + name: + description: + - The name of the database. + type: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Get instance of MySQL Database + azure_rm_mysqldatabase_info: + resource_group: myResourceGroup + server_name: server_name + name: database_name + + - name: List instances of MySQL Database + azure_rm_mysqldatabase_info: + resource_group: myResourceGroup + server_name: server_name +''' + +RETURN = ''' +databases: + description: + - A list of dictionaries containing facts for MySQL Databases. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMySQL/servers/testser + ver/databases/db1" + resource_group: + description: + - Resource group name. + returned: always + type: str + sample: testrg + server_name: + description: + - Server name. + returned: always + type: str + sample: testserver + name: + description: + - Resource name. + returned: always + type: str + sample: db1 + charset: + description: + - The charset of the database. + returned: always + type: str + sample: utf8 + collation: + description: + - The collation of the database. + returned: always + type: str + sample: English_United States.1252 +''' + +try: + from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + from azure.core.exceptions import ResourceNotFoundError + from azure.core.polling import LROPoller + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMMySqlDatabaseInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + server_name=dict( + type='str', + required=True + ), + name=dict( + type='str' + ) + ) + # store the results of the module operation + self.results = dict( + changed=False + ) + self.resource_group = None + self.server_name = None + self.name = None + super(AzureRMMySqlDatabaseInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_mysqldatabase_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_mysqldatabase_facts' module has been renamed to 'azure_rm_mysqldatabase_info'", version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if (self.resource_group is not None and + self.server_name is not None and + self.name is not None): + self.results['databases'] = self.get() + elif (self.resource_group is not None and + self.server_name is not None): + self.results['databases'] = self.list_by_server() + return self.results + + def get(self): + response = None + results = [] + try: + response = self.mysql_client.databases.get(resource_group_name=self.resource_group, + server_name=self.server_name, + database_name=self.name) + self.log("Response : {0}".format(response)) + except ResourceNotFoundError as e: + self.log('Could not get facts for Databases.') + + if response is not None: + results.append(self.format_item(response)) + + return results + + def list_by_server(self): + response = None + results = [] + try: + response = self.mysql_client.databases.list_by_server(resource_group_name=self.resource_group, + server_name=self.server_name) + self.log("Response : {0}".format(response)) + except Exception as e: + self.fail("Error listing for server {0} - {1}".format(self.server_name, str(e))) + + if response is not None: + for item in response: + results.append(self.format_item(item)) + + return results + + def format_item(self, item): + d = item.as_dict() + d = { + 'resource_group': self.resource_group, + 'server_name': self.server_name, + 'name': d['name'], + 'charset': d['charset'], + 'collation': d['collation'] + } + return d + + +def main(): + AzureRMMySqlDatabaseInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mysqlfirewallrule.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mysqlfirewallrule.py new file mode 100644 index 000000000..ef5765718 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mysqlfirewallrule.py @@ -0,0 +1,273 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Zim Kalinowski, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_mysqlfirewallrule +version_added: "0.1.2" +short_description: Manage MySQL firewall rule instance +description: + - Create, update and delete instance of MySQL firewall rule. + +options: + resource_group: + description: + - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. + required: True + server_name: + description: + - The name of the server. + required: True + name: + description: + - The name of the MySQL firewall rule. + required: True + start_ip_address: + description: + - The start IP address of the MySQL firewall rule. Must be IPv4 format. + required: True + end_ip_address: + description: + - The end IP address of the MySQL firewall rule. Must be IPv4 format. + required: True + state: + description: + - Assert the state of the MySQL firewall rule. Use C(present) to create or update a rule and C(absent) to ensure it is not present. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Create (or update) MySQL firewall rule + azure_rm_mysqlfirewallrule: + resource_group: myResourceGroup + server_name: testserver + name: rule1 + start_ip_address: 10.0.0.17 + end_ip_address: 10.0.0.20 +''' + +RETURN = ''' +id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMySQL/servers/testserver/fire + wallRules/rule1" +''' + +import time + +try: + from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + from azure.core.exceptions import ResourceNotFoundError + from azure.core.polling import LROPoller + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMMySqlFirewallRule(AzureRMModuleBase): + """Configuration class for an Azure RM MySQL firewall rule resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + server_name=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + start_ip_address=dict( + type='str' + ), + end_ip_address=dict( + type='str' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.server_name = None + self.name = None + self.start_ip_address = None + self.end_ip_address = None + + self.results = dict(changed=False) + self.state = None + self.parameters = dict() + self.to_do = Actions.NoAction + + super(AzureRMMySqlFirewallRule, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=False) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()): + if hasattr(self, key): + setattr(self, key, kwargs[key]) + if key in ['start_ip_address', 'end_ip_address']: + self.parameters[key] = kwargs[key] + + old_response = None + response = None + + resource_group = self.get_resource_group(self.resource_group) + + old_response = self.get_firewallrule() + + if not old_response: + self.log("MySQL firewall rule instance doesn't exist") + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + self.log("MySQL firewall rule instance already exists") + if self.state == 'absent': + self.to_do = Actions.Delete + elif self.state == 'present': + self.log("Need to check if MySQL firewall rule instance has to be deleted or may be updated") + if (self.start_ip_address is not None) and (self.start_ip_address != old_response['start_ip_address']): + self.to_do = Actions.Update + if (self.end_ip_address is not None) and (self.end_ip_address != old_response['end_ip_address']): + self.to_do = Actions.Update + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log("Need to Create / Update the MySQL firewall rule instance") + + if self.check_mode: + self.results['changed'] = True + return self.results + + response = self.create_update_firewallrule() + + if not old_response: + self.results['changed'] = True + else: + self.results['changed'] = old_response.__ne__(response) + self.log("Creation / Update done") + elif self.to_do == Actions.Delete: + self.log("MySQL firewall rule instance deleted") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_firewallrule() + # make sure instance is actually deleted, for some Azure resources, instance is hanging around + # for some time after deletion -- this should be really fixed in Azure + while self.get_firewallrule(): + time.sleep(20) + else: + self.log("MySQL firewall rule instance unchanged") + self.results['changed'] = False + response = old_response + + if response: + self.results["id"] = response["id"] + + return self.results + + def create_update_firewallrule(self): + ''' + Creates or updates MySQL firewall rule with the specified configuration. + + :return: deserialized MySQL firewall rule instance state dictionary + ''' + self.log("Creating / Updating the MySQL firewall rule instance {0}".format(self.name)) + + try: + response = self.mysql_client.firewall_rules.begin_create_or_update(resource_group_name=self.resource_group, + server_name=self.server_name, + firewall_rule_name=self.name, + parameters=self.parameters) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + + except Exception as exc: + self.log('Error attempting to create the MySQL firewall rule instance.') + self.fail("Error creating the MySQL firewall rule instance: {0}".format(str(exc))) + return response.as_dict() + + def delete_firewallrule(self): + ''' + Deletes specified MySQL firewall rule instance in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the MySQL firewall rule instance {0}".format(self.name)) + try: + response = self.mysql_client.firewall_rules.begin_delete(resource_group_name=self.resource_group, + server_name=self.server_name, + firewall_rule_name=self.name) + except Exception as e: + self.log('Error attempting to delete the MySQL firewall rule instance.') + self.fail("Error deleting the MySQL firewall rule instance: {0}".format(str(e))) + + return True + + def get_firewallrule(self): + ''' + Gets the properties of the specified MySQL firewall rule. + + :return: deserialized MySQL firewall rule instance state dictionary + ''' + self.log("Checking if the MySQL firewall rule instance {0} is present".format(self.name)) + found = False + try: + response = self.mysql_client.firewall_rules.get(resource_group_name=self.resource_group, + server_name=self.server_name, + firewall_rule_name=self.name) + found = True + self.log("Response : {0}".format(response)) + self.log("MySQL firewall rule instance : {0} found".format(response.name)) + except ResourceNotFoundError as e: + self.log('Did not find the MySQL firewall rule instance.') + if found is True: + return response.as_dict() + + return False + + +def main(): + """Main execution""" + AzureRMMySqlFirewallRule() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mysqlfirewallrule_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mysqlfirewallrule_info.py new file mode 100644 index 000000000..5bc8c1aff --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mysqlfirewallrule_info.py @@ -0,0 +1,195 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Zim Kalinowski, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_mysqlfirewallrule_info +version_added: "0.1.2" +short_description: Get Azure MySQL Firewall Rule facts +description: + - Get facts of Azure MySQL Firewall Rule. + +options: + resource_group: + description: + - The name of the resource group. + required: True + type: str + server_name: + description: + - The name of the server. + required: True + type: str + name: + description: + - The name of the server firewall rule. + type: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Get instance of MySQL Firewall Rule + azure_rm_mysqlfirewallrule_info: + resource_group: myResourceGroup + server_name: server_name + name: firewall_rule_name + + - name: List instances of MySQL Firewall Rule + azure_rm_mysqlfirewallrule_info: + resource_group: myResourceGroup + server_name: server_name +''' + +RETURN = ''' +rules: + description: + - A list of dictionaries containing facts for MySQL Firewall Rule. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/TestGroup/providers/Microsoft.DBforMySQL/servers/testserver/fire + wallRules/rule1" + server_name: + description: + - The name of the server. + returned: always + type: str + sample: testserver + name: + description: + - Resource name. + returned: always + type: str + sample: rule1 + start_ip_address: + description: + - The start IP address of the MySQL firewall rule. + returned: always + type: str + sample: 10.0.0.16 + end_ip_address: + description: + - The end IP address of the MySQL firewall rule. + returned: always + type: str + sample: 10.0.0.18 +''' + +try: + from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + from msrestazure.azure_operation import AzureOperationPoller + from azure.core.exceptions import ResourceNotFoundError + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMMySqlFirewallRuleInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + server_name=dict( + type='str', + required=True + ), + name=dict( + type='str' + ) + ) + # store the results of the module operation + self.results = dict( + changed=False + ) + self.resource_group = None + self.server_name = None + self.name = None + super(AzureRMMySqlFirewallRuleInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_mysqlfirewallrule_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_mysqlfirewallrule_facts' module has been renamed to 'azure_rm_mysqlfirewallrule_info'", version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if (self.name is not None): + self.results['rules'] = self.get() + else: + self.results['rules'] = self.list_by_server() + return self.results + + def get(self): + response = None + results = [] + try: + response = self.mysql_client.firewall_rules.get(resource_group_name=self.resource_group, + server_name=self.server_name, + firewall_rule_name=self.name) + self.log("Response : {0}".format(response)) + except ResourceNotFoundError as e: + self.log('Could not get facts for FirewallRules.') + + if response is not None: + results.append(self.format_item(response)) + + return results + + def list_by_server(self): + response = None + results = [] + try: + response = self.mysql_client.firewall_rules.list_by_server(resource_group_name=self.resource_group, + server_name=self.server_name) + self.log("Response : {0}".format(response)) + except Exception as e: + self.log('Could not get facts for FirewallRules.') + + if response is not None: + for item in response: + results.append(self.format_item(item)) + + return results + + def format_item(self, item): + d = item.as_dict() + d = { + 'resource_group': self.resource_group, + 'id': d['id'], + 'server_name': self.server_name, + 'name': d['name'], + 'start_ip_address': d['start_ip_address'], + 'end_ip_address': d['end_ip_address'] + } + return d + + +def main(): + AzureRMMySqlFirewallRuleInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mysqlserver.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mysqlserver.py new file mode 100644 index 000000000..49bfee6a6 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mysqlserver.py @@ -0,0 +1,470 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Zim Kalinowski, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_mysqlserver +version_added: "0.1.2" +short_description: Manage MySQL Server instance +description: + - Create, update and delete instance of MySQL Server. + +options: + resource_group: + description: + - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. + required: True + type: str + name: + description: + - The name of the server. + required: True + type: str + sku: + description: + - The SKU (pricing tier) of the server. + type: dict + suboptions: + name: + description: + - The name of the sku, typically, tier + family + cores, for example C(B_Gen4_1), C(GP_Gen5_8). + type: str + tier: + description: + - The tier of the particular SKU, for example C(Basic). + type: str + choices: + - basic + - standard + capacity: + description: + - The scale up/out capacity, representing server's compute units. + type: str + size: + description: + - The size code, to be interpreted by resource as appropriate. + type: int + location: + description: + - Resource location. If not set, location from the resource group will be used as default. + type: str + storage_profile: + description: + - Storage Profile properties of a server. + type: dict + suboptions: + storage_mb: + description: + - The maximum storage allowed for a server. + type: int + backup_retention_days: + description: + - Backup retention days for the server + type: int + geo_redundant_backup: + description: + - Enable Geo-redundant or not for server backup. + type: str + choices: + - Disabled + - Enabled + storage_autogrow: + description: + - Enable Storage Auto Grow. + type: str + choices: + - Disabled + - Enabled + version: + description: + - Server version. + type: str + choices: + - '5.7' + - '8.0' + enforce_ssl: + description: + - Enable SSL enforcement. + type: bool + default: False + admin_username: + description: + - The administrator's login name of a server. + - Can only be specified when the server is being created (and is required for creation). + type: str + admin_password: + description: + - The password of the administrator login. + type: str + create_mode: + description: + - Create mode of SQL Server. + default: Default + type: str + restarted: + description: + - Set to C(true) with I(state=present) to restart a running mysql server. + default: False + type: bool + state: + description: + - Assert the state of the MySQL Server. Use C(present) to create or update a server and C(absent) to delete it. + default: present + type: str + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Create (or update) MySQL Server + azure_rm_mysqlserver: + resource_group: myResourceGroup + name: testserver + sku: + name: B_Gen5_1 + tier: Basic + location: eastus + storage_profile: + storage_mb: 51200 + backup_retention_days: 7 + geo_redundant_backup: Disabled + storage_autogrow: Disabled + enforce_ssl: True + version: 5.7 + admin_username: cloudsa + admin_password: password +''' + +RETURN = ''' +id: + description: + - Resource ID. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMySQL/servers/mysqlsrv1b6dd89593 +version: + description: + - Server version. Possible values include C(5.6), C(5.7), C(8.0). + returned: always + type: str + sample: 5.7 +state: + description: + - A state of a server that is visible to user. Possible values include C(Ready), C(Dropping), C(Disabled). + returned: always + type: str + sample: Ready +fully_qualified_domain_name: + description: + - The fully qualified domain name of a server. + returned: always + type: str + sample: mysqlsrv1b6dd89593.mysql.database.azure.com +''' + +import time + +try: + from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + from azure.core.exceptions import ResourceNotFoundError + from azure.core.polling import LROPoller + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + +storage_profile_spec = dict( + storage_mb=dict( + type='int' + ), + backup_retention_days=dict( + type='int' + ), + geo_redundant_backup=dict( + type='str', + choices=['Disabled', 'Enabled'] + ), + storage_autogrow=dict( + type='str', + choices=['Disabled', 'Enabled'] + ) +) + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMMySqlServers(AzureRMModuleBase): + """Configuration class for an Azure RM MySQL Server resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + sku=dict( + type='dict' + ), + location=dict( + type='str' + ), + storage_profile=dict( + type='dict', + options=storage_profile_spec + ), + version=dict( + type='str', + choices=['5.7', '8.0'] + ), + enforce_ssl=dict( + type='bool', + default=False + ), + create_mode=dict( + type='str', + default='Default' + ), + admin_username=dict( + type='str' + ), + restarted=dict( + type='bool', + default=False + ), + admin_password=dict( + type='str', + no_log=True + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.name = None + self.parameters = dict() + self.tags = None + self.restarted = False + + self.results = dict(changed=False) + self.state = None + self.to_do = Actions.NoAction + + super(AzureRMMySqlServers, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()) + ['tags']: + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + if key == "sku": + ev = kwargs[key] + if 'tier' in ev: + if ev['tier'] == 'basic': + ev['tier'] = 'Basic' + elif ev['tier'] == 'standard': + ev['tier'] = 'Standard' + self.parameters["sku"] = ev + elif key == "location": + self.parameters["location"] = kwargs[key] + elif key == "storage_profile": + self.parameters.setdefault("properties", {})["storage_profile"] = kwargs[key] + elif key == "version": + self.parameters.setdefault("properties", {})["version"] = kwargs[key] + elif key == "enforce_ssl": + self.parameters.setdefault("properties", {})["ssl_enforcement"] = 'Enabled' if kwargs[key] else 'Disabled' + elif key == "create_mode": + self.parameters.setdefault("properties", {})["create_mode"] = kwargs[key] + elif key == "admin_username": + self.parameters.setdefault("properties", {})["administrator_login"] = kwargs[key] + elif key == "admin_password": + self.parameters.setdefault("properties", {})["administrator_login_password"] = kwargs[key] + + old_response = None + response = None + + resource_group = self.get_resource_group(self.resource_group) + + if "location" not in self.parameters: + self.parameters["location"] = resource_group.location + + old_response = self.get_mysqlserver() + + if not old_response: + self.log("MySQL Server instance doesn't exist") + if self.restarted: + self.fail("Mysql server instance doesn't exist, can't be restart") + + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + if self.restarted: + self.restart_mysqlserver() + self.results['changed'] = True + self.results['state'] = old_response + return self.results + + self.log("MySQL Server instance already exists") + if self.state == 'absent': + self.to_do = Actions.Delete + elif self.state == 'present': + self.log("Need to check if MySQL Server instance has to be deleted or may be updated") + update_tags, newtags = self.update_tags(old_response.get('tags', {})) + if update_tags: + self.tags = newtags + self.to_do = Actions.Update + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log("Need to Create / Update the MySQL Server instance") + + if self.check_mode: + self.results['changed'] = True + return self.results + + response = self.create_update_mysqlserver() + + if not old_response: + self.results['changed'] = True + else: + self.results['changed'] = old_response.__ne__(response) + self.log("Creation / Update done") + elif self.to_do == Actions.Delete: + self.log("MySQL Server instance deleted") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_mysqlserver() + # make sure instance is actually deleted, for some Azure resources, instance is hanging around + # for some time after deletion -- this should be really fixed in Azure + while self.get_mysqlserver(): + time.sleep(20) + else: + self.log("MySQL Server instance unchanged") + self.results['changed'] = False + response = old_response + + if response: + self.results["id"] = response["id"] + self.results["version"] = response["version"] + self.results["state"] = response["user_visible_state"] + self.results["fully_qualified_domain_name"] = response["fully_qualified_domain_name"] + + return self.results + + def restart_mysqlserver(self): + ''' + Restart MySQL Server. + ''' + self.log("Restart MySQL Server instance {0}".format(self.name)) + + try: + response = self.mysql_client.servers.begin_restart(resource_group_name=self.resource_group, server_name=self.name) + except Exception as exc: + self.fail("Error restarting mysql server {0} - {1}".format(self.name, str(exc))) + return True + + def create_update_mysqlserver(self): + ''' + Creates or updates MySQL Server with the specified configuration. + + :return: deserialized MySQL Server instance state dictionary + ''' + self.log("Creating / Updating the MySQL Server instance {0}".format(self.name)) + + try: + self.parameters['tags'] = self.tags + if self.to_do == Actions.Create: + response = self.mysql_client.servers.begin_create(resource_group_name=self.resource_group, + server_name=self.name, + parameters=self.parameters) + else: + # structure of parameters for update must be changed + self.parameters.update(self.parameters.pop("properties", {})) + response = self.mysql_client.servers.begin_update(resource_group_name=self.resource_group, + server_name=self.name, + parameters=self.parameters) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + + except Exception as exc: + self.log('Error attempting to create the MySQL Server instance.') + self.fail("Error creating the MySQL Server instance: {0}".format(str(exc))) + return response.as_dict() + + def delete_mysqlserver(self): + ''' + Deletes specified MySQL Server instance in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the MySQL Server instance {0}".format(self.name)) + try: + response = self.mysql_client.servers.begin_delete(resource_group_name=self.resource_group, + server_name=self.name) + except Exception as e: + self.log('Error attempting to delete the MySQL Server instance.') + self.fail("Error deleting the MySQL Server instance: {0}".format(str(e))) + + return True + + def get_mysqlserver(self): + ''' + Gets the properties of the specified MySQL Server. + + :return: deserialized MySQL Server instance state dictionary + ''' + self.log("Checking if the MySQL Server instance {0} is present".format(self.name)) + found = False + try: + response = self.mysql_client.servers.get(resource_group_name=self.resource_group, + server_name=self.name) + found = True + self.log("Response : {0}".format(response)) + self.log("MySQL Server instance : {0} found".format(response.name)) + except ResourceNotFoundError as e: + self.log('Did not find the MySQL Server instance.') + if found is True: + return response.as_dict() + + return False + + +def main(): + """Main execution""" + AzureRMMySqlServers() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mysqlserver_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mysqlserver_info.py new file mode 100644 index 000000000..503219cb8 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_mysqlserver_info.py @@ -0,0 +1,283 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Zim Kalinowski, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_mysqlserver_info +version_added: "0.1.2" +short_description: Get Azure MySQL Server facts +description: + - Get facts of MySQL Server. + +options: + resource_group: + description: + - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. + required: True + type: str + name: + description: + - The name of the server. + type: str + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Get instance of MySQL Server + azure_rm_mysqlserver_info: + resource_group: myResourceGroup + name: server_name + tags: + - key + + - name: List instances of MySQL Server + azure_rm_mysqlserver_info: + resource_group: myResourceGroup +''' + +RETURN = ''' +servers: + description: + - A list of dictionaries containing facts for MySQL servers. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMySQL/servers/myabdud1223 + resource_group: + description: + - Resource group name. + returned: always + type: str + sample: myResourceGroup + name: + description: + - Resource name. + returned: always + type: str + sample: myabdud1223 + location: + description: + - The location the resource resides in. + returned: always + type: str + sample: eastus + sku: + description: + - The SKU of the server. + returned: always + type: complex + contains: + name: + description: + - The name of the SKU. + returned: always + type: str + sample: GP_Gen4_2 + tier: + description: + - The tier of the particular SKU. + returned: always + type: str + sample: GeneralPurpose + capacity: + description: + - The scale capacity. + returned: always + type: int + sample: 2 + storage_profile: + description: + - Storage Profile properties of a server. + type: complex + returned: always + contains: + storage_mb: + description: + - The maximum storage allowed for a server. + returned: always + type: int + sample: 128000 + backup_retention_days: + description: + - Backup retention days for the server + returned: always + type: int + sample: 7 + geo_redundant_backup: + description: + - Enable Geo-redundant or not for server backup. + returned: always + type: str + sample: Disabled + storage_autogrow: + description: + - Enable Storage Auto Grow. + returned: always + type: str + sample: Disabled + enforce_ssl: + description: + - Enable SSL enforcement. + returned: always + type: bool + sample: False + admin_username: + description: + - The administrator's login name of a server. + returned: always + type: str + sample: serveradmin + version: + description: + - Server version. + returned: always + type: str + sample: "9.6" + user_visible_state: + description: + - A state of a server that is visible to user. + returned: always + type: str + sample: Ready + fully_qualified_domain_name: + description: + - The fully qualified domain name of a server. + returned: always + type: str + sample: myabdud1223.mys.database.azure.com + tags: + description: + - Tags assigned to the resource. Dictionary of string:string pairs. + type: dict + sample: { tag1: abc } +''' + +try: + from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + from azure.core.exceptions import ResourceNotFoundError + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMMySqlServerInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str' + ), + tags=dict( + type='list', + elements='str' + ) + ) + # store the results of the module operation + self.results = dict( + changed=False + ) + self.resource_group = None + self.name = None + self.tags = None + super(AzureRMMySqlServerInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False, facts_module=True) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_mysqlserver_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_mysqlserver_facts' module has been renamed to 'azure_rm_mysqlserver_info'", version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if (self.resource_group is not None and + self.name is not None): + self.results['servers'] = self.get() + elif (self.resource_group is not None): + self.results['servers'] = self.list_by_resource_group() + return self.results + + def get(self): + response = None + results = [] + try: + response = self.mysql_client.servers.get(resource_group_name=self.resource_group, + server_name=self.name) + self.log("Response : {0}".format(response)) + except ResourceNotFoundError as e: + self.log('Could not get facts for MySQL Server.') + + if response and self.has_tags(response.tags, self.tags): + results.append(self.format_item(response)) + + return results + + def list_by_resource_group(self): + response = None + results = [] + try: + response = self.mysql_client.servers.list_by_resource_group(resource_group_name=self.resource_group) + self.log("Response : {0}".format(response)) + except Exception as e: + self.log('Could not get facts for MySQL Servers.') + + if response is not None: + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(self.format_item(item)) + + return results + + def format_item(self, item): + d = item.as_dict() + d = { + 'id': d['id'], + 'resource_group': self.resource_group, + 'name': d['name'], + 'sku': d['sku'], + 'location': d['location'], + 'storage_profile': d['storage_profile'], + 'version': d['version'], + 'enforce_ssl': (d['ssl_enforcement'] == 'Enabled'), + 'admin_username': d['administrator_login'], + 'user_visible_state': d['user_visible_state'], + 'fully_qualified_domain_name': d['fully_qualified_domain_name'], + 'tags': d.get('tags') + } + + return d + + +def main(): + AzureRMMySqlServerInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_natgateway.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_natgateway.py new file mode 100644 index 000000000..e4dd46ade --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_natgateway.py @@ -0,0 +1,414 @@ +#!/usr/bin/python +# +# Copyright (c) 2022 Andrea Decorte, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = """ +--- +module: azure_rm_natgateway +short_description: Manage Azure NAT Gateway instance +description: + - Create, update and delete instances of Azure NAT Gateway. + +options: + resource_group: + description: + - The name of the resource group. + required: True + type: str + name: + description: + - The name of the NAT Gateway. + required: True + type: str + location: + description: + - Resource location. If not set, location from the resource group will be used as default. + type: str + zones: + description: + - List of Availability Zones in which this NAT Gateway should be located. + type: list + elements: int + choices: + - 1 + - 2 + - 3 + sku: + description: + - SKU of the NAT gateway resource. + type: dict + suboptions: + name: + description: + - Name of the NAT gateway SKU. Defaults to C(standard). + choices: + - 'standard' + default: 'standard' + type: str + idle_timeout_in_minutes: + description: + - The idle timeout in minutes which should be used. Defaults to 4. + default: 4 + type: int + public_ip_addresses: + description: + - A list of Public IP Addresses which should be associated with the NAT Gateway resource. + - Each element can be the name or resource id, or a dict contains C(name), C(resource_group) information of the IP address. + type: list + elements: str + state: + description: + - Assert the state of the NAT gateway. Use C(present) to create or update and C(absent) to delete. + default: present + type: str + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Andrea Decorte (@andreadecorte) + +""" + +EXAMPLES = """ +- name: Create instance of NAT Gateway + azure_rm_natgateway: + resource_group: myResourceGroup + name: myNATGateway + public_ip_addresses: + - pip_name + +- name: Create instance of NAT Gateway + azure_rm_natgateway: + resource_group: myResourceGroup + name: myNATGateway + idle_timeout_in_minutes: 10 + location: eastus + zones: [ 1 ] + sku: + name: standard +""" + +RETURN = """ +id: + description: + - NAT gateway resource ID. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/natGateways/myNATGw +name: + description: + - Name of NAT gateway. + returned: always + type: str + sample: myNATGw +resource_group: + description: + - Name of resource group. + returned: always + type: str + sample: myResourceGroup +location: + description: + - Location of NAT gateway. + returned: always + type: str + sample: centralus +""" + +import time +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.core.polling import LROPoller + from msrestazure.tools import parse_resource_id, is_valid_resource_id +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +sku_spec = dict( + name=dict(type='str', choices=['standard'], default='standard') +) + + +class AzureRMNATGateways(AzureRMModuleBase): + """Configuration class for an Azure RM NAT Gateway resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + location=dict( + type='str' + ), + sku=dict( + type='dict', + options=sku_spec + ), + idle_timeout_in_minutes=dict( + type='int', + default=4 + ), + zones=dict( + type='list', + elements='int', + choices=[1, 2, 3] + ), + public_ip_addresses=dict( + type='list', + elements='str' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.name = None + self.parameters = dict() + + self.results = dict(changed=False) + self.state = None + self.to_do = Actions.NoAction + + super(AzureRMNATGateways, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, supports_tags=True) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()) + ['tags']: + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + if key == "id": + self.parameters["id"] = kwargs[key] + elif key == "location": + self.parameters["location"] = kwargs[key] + elif key == "idle_timeout_in_minutes": + self.parameters["idle_timeout_in_minutes"] = kwargs[key] + elif key == "zones": + self.parameters["zones"] = kwargs[key] + elif key == "public_ip_addresses": + if "public_ip_addresses" not in self.parameters: + self.parameters["public_ip_addresses"] = [] + for resource in kwargs[key]: + self.parameters["public_ip_addresses"].append({"id": self.return_resource_id(resource)}) + elif key == "sku": + ev = kwargs[key] + if "name" in ev: + if ev["name"] == "standard": + ev["name"] = "Standard" + self.parameters["sku"] = ev + + old_response = None + response = None + + resource_group = self.get_resource_group(self.resource_group) + + if "location" not in self.parameters: + self.parameters["location"] = resource_group.location + + # sku can be null, define a default value + if "sku" not in self.parameters: + self.parameters["sku"] = {"name": "Standard"} + + old_response = self.get_natgateway() + + if not old_response: + self.log("NAT Gateway instance doesn't exist") + if self.state == "absent": + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + self.log("NAT Gateway instance already exists") + if self.state == "absent": + self.to_do = Actions.Delete + elif self.state == "present": + self.log("Need to check if NAT Gateway instance has to be deleted or may be updated") + self.to_do = Actions.Update + + if (self.to_do == Actions.Update): + if (self.parameters["location"] != old_response["location"] or + self.check_if_changed("zones", old_response) or + self.check_if_changed("idle_timeout_in_minutes", old_response) or + self.check_if_changed("public_ip_addresses", old_response) or + self.parameters["sku"]["name"] != old_response["sku"]["name"]): + self.to_do = Actions.Update + else: + self.to_do = Actions.NoAction + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log("Need to Create / Update the NAT Gateway instance") + + if self.check_mode: + self.results["changed"] = True + self.results["parameters"] = self.parameters + return self.results + + response = self.create_update_natgateway() + + if not old_response: + self.results["changed"] = True + else: + self.results["changed"] = (old_response != response) + self.log("Creation / Update done") + + elif self.to_do == Actions.Delete: + self.log("NAT Gateway instance deleted") + self.results["changed"] = True + + if self.check_mode: + return self.results + + self.delete_natgateway() + # make sure instance is actually deleted, for some Azure resources, instance is hanging around + # for some time after deletion -- this should be really fixed in Azure + while self.get_natgateway(): + time.sleep(20) + else: + self.log("NAT Gateway instance unchanged") + self.results["changed"] = False + response = old_response + + if response: + self.results.update(self.format_response(response)) + + return self.results + + def create_update_natgateway(self): + """ + Creates or updates NAT Gateway with the specified configuration. + + :return: deserialized NAT Gateway instance state dictionary + """ + self.log("Creating / Updating the NAT Gateway instance {0}".format(self.name)) + + try: + response = self.network_client.nat_gateways.begin_create_or_update(resource_group_name=self.resource_group, + nat_gateway_name=self.name, + parameters=self.parameters) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + + except Exception as exc: + self.log("Error attempting to create the NAT Gateway instance.") + self.fail("Error creating the NAT Gateway instance: {0}".format(str(exc))) + return response.as_dict() + + def delete_natgateway(self): + """ + Deletes specified NAT Gateway instance in the specified subscription and resource group. + + :return: True + """ + self.log("Deleting the NAT Gateway instance {0}".format(self.name)) + try: + response = self.network_client.nat_gateways.begin_delete(resource_group_name=self.resource_group, + nat_gateway_name=self.name) + except Exception as e: + self.log("Error attempting to delete the NAT Gateway instance.") + self.fail("Error deleting the NAT Gateway instance: {0}".format(str(e))) + + return True + + def get_natgateway(self): + """ + Gets the properties of the specified NAT Gateway. + + :return: deserialized NAT Gateway instance state dictionary + """ + self.log("Checking if the NAT Gateway instance {0} is present".format(self.name)) + found = False + try: + response = self.network_client.nat_gateways.get(resource_group_name=self.resource_group, + nat_gateway_name=self.name) + found = True + self.log("Response : {0}".format(response)) + self.log("NAT Gateway instance : {0} found".format(response.name)) + except ResourceNotFoundError as e: + self.log("Did not find the NAT Gateway instance.") + if found is True: + return response.as_dict() + + return False + + def check_if_changed(self, parameter_name, old_response): + """" + Compute if there is an update to the resource or not + + :return: True if resource is changed compared to the current one + """ + if parameter_name in self.parameters and (parameter_name not in old_response or self.parameters[parameter_name] != old_response[parameter_name]): + # Parameter changed + return True + elif parameter_name not in self.parameters and parameter_name in old_response: + # Parameter omitted while it was specified before + return True + else: + return False + + def format_response(self, natgw_dict): + """ + Build format of the response + + :return dictionary filled with resource data + """ + id = natgw_dict.get("id") + id_dict = parse_resource_id(id) + d = { + "id": id, + "name": natgw_dict.get("name"), + "resource_group": id_dict.get("resource_group", self.resource_group), + "location": natgw_dict.get("location") + } + return d + + def return_resource_id(self, resource): + """ + Build an IP Address resource id from different inputs + + :return string containing the Azure id of the resource + """ + if is_valid_resource_id(resource): + return resource + resource_dict = self.parse_resource_to_dict(resource) + return format_resource_id(val=resource_dict["name"], + subscription_id=resource_dict.get("subscription_id"), + namespace="Microsoft.Network", + types="publicIPAddresses", + resource_group=resource_dict.get("resource_group")) + + +def main(): + """Main execution""" + AzureRMNATGateways() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_natgateway_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_natgateway_info.py new file mode 100644 index 000000000..262dd2d99 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_natgateway_info.py @@ -0,0 +1,219 @@ +#!/usr/bin/python +# +# Copyright (c) 2022 Andrea Decorte, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_natgateway_info +version_added: "1.13.0" +short_description: Retrieve NAT Gateway instance facts +description: + - Get facts for a NAT Gateway instance. +options: + name: + description: + - Only show results for a specific NAT gateway. + type: str + resource_group: + description: + - Limit results by resource group. + type: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Andrea Decorte (@andreadecorte) +''' + +EXAMPLES = ''' + - name: Get facts for NAT gateway by name. + azure_rm_natgateway_info: + name: Mynatgw + resource_group: MyResourceGroup + + - name: Get facts for all NAT gateways in resource group. + azure_rm_natgateway_info: + resource_group: MyResourceGroup + + - name: Get facts for all NAT gateways. + azure_rm_natgateway_info: +''' + +RETURN = ''' +gateways: + description: + - A list of dictionaries containing facts for a NAT gateway. + returned: always + type: list + elements: dict + contains: + id: + description: + - NAT gateway resource ID. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/natGateways/mynatgw + name: + description: + - Name of NAT gateway. + returned: always + type: str + sample: mynatgw + resource_group: + description: + - Name of resource group. + returned: always + type: str + sample: myResourceGroup + location: + description: + - Location of NAT gateway. + returned: always + type: str + sample: centralus + idle_timeout_in_minutes: + description: + - The idle timeout of the NAT gateway. + returned: always + type: int + sample: 4 + sku: + description: + - SKU of the NAT gateway. + returned: always + type: dict + contains: + name: + description: + - The name of the SKU. + returned: always + type: str + sample: Standard + zones: + description: + - Availability Zones of the NAT gateway. + returned: always + type: list + elements: str + public_ip_addresses: + description: + - List of ids of public IP addresses associated to the NAT Gateway. + returned: always + type: list + elements: str +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError + from msrestazure.tools import parse_resource_id +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMNATGatewayInfo(AzureRMModuleBase): + + def __init__(self): + self.module_arg_spec = dict( + name=dict(type='str'), + resource_group=dict(type='str'), + ) + + self.results = dict( + changed=False, + ) + + self.name = None + self.resource_group = None + + super(AzureRMNATGatewayInfo, self).__init__(self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + facts_module=True) + + def exec_module(self, **kwargs): + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.name is not None: + self.results["gateways"] = self.get() + elif self.resource_group is not None: + self.results["gateways"] = self.list_by_rg() + else: + self.results["gateways"] = self.list_all() + + return self.results + + def get(self): + response = None + results = [] + try: + response = self.network_client.nat_gateways.get(resource_group_name=self.resource_group, nat_gateway_name=self.name) + except ResourceNotFoundError: + pass + + if response is not None: + results.append(self.format_response(response)) + + return results + + def list_by_rg(self): + response = None + results = [] + try: + response = self.network_client.nat_gateways.list(resource_group_name=self.resource_group) + except Exception as exc: + request_id = exc.request_id if exc.request_id else '' + self.fail("Error listing NAT gateways in resource groups {0}: {1} - {2}".format(self.resource_group, request_id, str(exc))) + + for item in response: + results.append(self.format_response(item)) + + return results + + def list_all(self): + response = None + results = [] + try: + response = self.network_client.nat_gateways.list_all() + except Exception as exc: + request_id = exc.request_id if exc.request_id else '' + self.fail("Error listing all NAT gateways: {0} - {1}".format(request_id, str(exc))) + + for item in response: + results.append(self.format_response(item)) + + return results + + def format_response(self, natgw): + d = natgw.as_dict() + id = d.get("id") + id_dict = parse_resource_id(id) + d = { + "id": id, + "name": d.get("name"), + "resource_group": id_dict.get("resource_group", self.resource_group), + "location": d.get("location"), + "sku": d.get("sku"), + "zones": d.get("zones"), + "idle_timeout_in_minutes": d.get("idle_timeout_in_minutes"), + "public_ip_addresses": d.get("public_ip_addresses") + } + return d + + +def main(): + AzureRMNATGatewayInfo() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_networkinterface.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_networkinterface.py new file mode 100644 index 000000000..bd988f06a --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_networkinterface.py @@ -0,0 +1,946 @@ +#!/usr/bin/python +# +# Copyright (c) 2016 Matt Davis, +# Chris Houseknecht, +# Yuwei ZHou, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_networkinterface + +version_added: "0.1.0" + +short_description: Manage Azure network interfaces + +description: + - Create, update or delete a network interface. + - When creating a network interface you must provide the name of an existing virtual network, the name of an existing subnet within the virtual network. + - A default security group and public IP address will be created automatically. + - Or you can provide the name of an existing security group and public IP address. + - See the examples below for more details. + +options: + resource_group: + description: + - Name of a resource group where the network interface exists or will be created. + required: true + name: + description: + - Name of the network interface. + required: true + state: + description: + - Assert the state of the network interface. Use C(present) to create or update an interface and + C(absent) to delete an interface. + default: present + choices: + - absent + - present + location: + description: + - Valid Azure location. Defaults to location of the resource group. + virtual_network: + description: + - An existing virtual network with which the network interface will be associated. Required when creating a network interface. + - It can be the virtual network's name. + - Make sure your virtual network is in the same resource group as NIC when you give only the name. + - It can be the virtual network's resource id. + - It can be a dict which contains I(name) and I(resource_group) of the virtual network. + aliases: + - virtual_network_name + required: true + subnet_name: + description: + - Name of an existing subnet within the specified virtual network. Required when creating a network interface. + - Use the C(virtual_network)'s resource group. + aliases: + - subnet + required: true + os_type: + description: + - Determines any rules to be added to a default security group. + - When creating a network interface, if no security group name is provided, a default security group will be created. + - If the I(os_type=Windows), a rule allowing RDP access will be added. + - If the I(os_type=Linux), a rule allowing SSH access will be added. + choices: + - Windows + - Linux + default: Linux + private_ip_address: + description: + - (Deprecate) Valid IPv4 address that falls within the specified subnet. + - This option will be deprecated in 2.9, use I(ip_configurations) instead. + private_ip_allocation_method: + description: + - (Deprecate) Whether or not the assigned IP address is permanent. + - When creating a network interface, if you specify I(private_ip_address=Static), you must provide a value for I(private_ip_address). + - You can update the allocation method to C(Static) after a dynamic private IP address has been assigned. + - This option will be deprecated in 2.9, use I(ip_configurations) instead. + default: Dynamic + choices: + - Dynamic + - Static + public_ip: + description: + - (Deprecate) When creating a network interface, if no public IP address name is provided a default public IP address will be created. + - Set to C(false) if you do not want a public IP address automatically created. + - This option will be deprecated in 2.9, use I(ip_configurations) instead. + type: bool + default: 'yes' + public_ip_address_name: + description: + - (Deprecate) Name of an existing public IP address object to associate with the security group. + - This option will be deprecated in 2.9, use I(ip_configurations) instead. + aliases: + - public_ip_address + - public_ip_name + public_ip_allocation_method: + description: + - (Deprecate) If a I(public_ip_address_name) is not provided, a default public IP address will be created. + - The allocation method determines whether or not the public IP address assigned to the network interface is permanent. + - This option will be deprecated in 2.9, use I(ip_configurations) instead. + choices: + - Dynamic + - Static + default: Dynamic + ip_configurations: + description: + - List of IP configurations. Each configuration object should include + field I(private_ip_address), I(private_ip_allocation_method), I(public_ip_address_name), I(public_ip), I(public_ip_allocation_method), I(name). + suboptions: + name: + description: + - Name of the IP configuration. + required: true + private_ip_address: + description: + - Private IP address for the IP configuration. + private_ip_address_version: + description: + - The version of the IP configuration. + choices: + - IPv4 + - IPv6 + default: IPv4 + private_ip_allocation_method: + description: + - Private IP allocation method. + choices: + - Dynamic + - Static + default: Dynamic + public_ip_address_name: + description: + - Name of the public IP address. None for disable IP address. + aliases: + - public_ip_address + - public_ip_name + public_ip_allocation_method: + description: + - Public IP allocation method. + choices: + - Dynamic + - Static + default: Dynamic + load_balancer_backend_address_pools: + description: + - List of existing load-balancer backend address pools to associate with the network interface. + - Can be written as a resource ID. + - Also can be a dict of I(name) and I(load_balancer). + application_gateway_backend_address_pools: + description: + - List of existing application gateway backend address pools to associate with the network interface. + - Can be written as a resource ID. + - Also can be a dict of I(name) and I(application_gateway). + version_added: "1.10.0" + primary: + description: + - Whether the IP configuration is the primary one in the list. + - The first IP configuration default set to I(primary=True). + type: bool + default: False + application_security_groups: + description: + - List of application security groups in which the IP configuration is included. + - Element of the list could be a resource id of application security group, or dict of I(resource_group) and I(name). + enable_accelerated_networking: + description: + - Whether the network interface should be created with the accelerated networking feature or not. + type: bool + default: False + create_with_security_group: + description: + - Whether a security group should be be created with the NIC. + - If this flag set to C(True) and no I(security_group) set, a default security group will be created. + type: bool + default: True + security_group: + description: + - An existing security group with which to associate the network interface. + - If not provided, a default security group will be created when I(create_with_security_group=true). + - It can be the name of security group. + - Make sure the security group is in the same resource group when you only give its name. + - It can be the resource id. + - It can be a dict contains security_group's I(name) and I(resource_group). + aliases: + - security_group_name + open_ports: + description: + - When a default security group is created for a Linux host a rule will be added allowing inbound TCP + connections to the default SSH port C(22), and for a Windows host rules will be added allowing inbound + access to RDP ports C(3389) and C(5986). Override the default ports by providing a list of open ports. + enable_ip_forwarding: + description: + - Whether to enable IP forwarding. + aliases: + - ip_forwarding + type: bool + default: False + dns_servers: + description: + - Which DNS servers should the NIC lookup. + - List of IP addresses. + type: list +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Chris Houseknecht (@chouseknecht) + - Matt Davis (@nitzmahone) + - Yuwei Zhou (@yuwzho) +''' + +EXAMPLES = ''' + - name: Create a network interface with minimal parameters + azure_rm_networkinterface: + name: nic001 + resource_group: myResourceGroup + virtual_network: vnet001 + subnet_name: subnet001 + ip_configurations: + - name: ipconfig1 + public_ip_address_name: publicip001 + primary: True + + - name: Create a network interface with private IP address only (no Public IP) + azure_rm_networkinterface: + name: nic001 + resource_group: myResourceGroup + virtual_network: vnet001 + subnet_name: subnet001 + create_with_security_group: False + ip_configurations: + - name: ipconfig1 + primary: True + + - name: Create a network interface for use in a Windows host (opens RDP port) with custom RDP port + azure_rm_networkinterface: + name: nic002 + resource_group: myResourceGroup + virtual_network: vnet001 + subnet_name: subnet001 + os_type: Windows + rdp_port: 3399 + security_group: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkSecurit + yGroups/nsg001" + ip_configurations: + - name: ipconfig1 + public_ip_address_name: publicip001 + primary: True + + - name: Create a network interface using existing security group and public IP + azure_rm_networkinterface: + name: nic003 + resource_group: myResourceGroup + virtual_network: vnet001 + subnet_name: subnet001 + security_group: secgroup001 + ip_configurations: + - name: ipconfig1 + public_ip_address_name: publicip001 + primary: True + + - name: Create a network with multiple ip configurations + azure_rm_networkinterface: + name: nic004 + resource_group: myResourceGroup + subnet_name: subnet001 + virtual_network: vnet001 + security_group: + name: testnic002 + resource_group: Testing1 + ip_configurations: + - name: ipconfig1 + public_ip_address_name: publicip001 + primary: True + - name: ipconfig2 + load_balancer_backend_address_pools: + - "{{ loadbalancer001.state.backend_address_pools[0].id }}" + - name: backendaddrpool1 + load_balancer: loadbalancer001 + + - name: Create network interface attached to application gateway backend address pool + azure_rm_networkinterface: + name: nic-appgw + resource_group: myResourceGroup + virtual_network: vnet001 + subnet_name: subnet001 + create_with_security_group: false + public_ip: false + ip_configurations: + - name: default + primary: true + application_gateway_backend_address_pools: + - name: myApplicationGatewayBackendAddressPool + application_gateway: myApplicationGateway + + - name: Create a network interface in accelerated networking mode + azure_rm_networkinterface: + name: nic005 + resource_group: myResourceGroup + virtual_network_name: vnet001 + subnet_name: subnet001 + enable_accelerated_networking: True + + - name: Create a network interface with IP forwarding + azure_rm_networkinterface: + name: nic001 + resource_group: myResourceGroup + virtual_network: vnet001 + subnet_name: subnet001 + ip_forwarding: True + ip_configurations: + - name: ipconfig1 + public_ip_address_name: publicip001 + primary: True + + - name: Create a network interface with dns servers + azure_rm_networkinterface: + name: nic009 + resource_group: myResourceGroup + virtual_network: vnet001 + subnet_name: subnet001 + dns_servers: + - 8.8.8.8 + + - name: Delete network interface + azure_rm_networkinterface: + resource_group: myResourceGroup + name: nic003 + state: absent +''' + +RETURN = ''' +state: + description: + - The current state of the network interface. + returned: always + type: complex + contains: + dns_server: + description: + - Which DNS servers should the NIC lookup. + - List of IP addresses. + type: list + sample: ['8.9.10.11', '7.8.9.10'] + dns_setting: + description: + - The DNS settings in network interface. + type: dict + sample: { + "applied_dns_servers": [], + "dns_servers": [ + "8.9.10.11", + "7.8.9.10" + ], + "internal_dns_name_label": null, + "internal_fqdn": null + } + enable_ip_forwarding: + description: + Whether to enable IP forwarding. + type: bool + sample: true + etag: + description: + - A unique read-only string that changes whenever the resource is updated. + type: str + sample: 'W/"be115a43-2148-4545-a324-f33ad444c926"' + id: + description: + - Id of the network interface. + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkInterfaces/nic003" + enable_accelerated_networking: + description: + - Whether the network interface should be created with the accelerated networking feature or not. + type: bool + sample: true + ip_configurations: + description: + - List of IP configurations. + type: complex + contains: + name: + description: + - Name of the IP configuration. + type: str + sample: default + load_balancer_backend_address_pools: + description: + - List of existing load-balancer backend address pools associated with the network interface. + type: list + application_gateway_backend_address_pools: + description: + - List of existing application gateway backend address pool resource IDs associated with the network interface. + type: list + version_added: "1.10.0" + sample: ["/subscriptions/xxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/applicationGateways/myGateway/ + backendAddressPools/myBackendAddressPool"] + private_ip_address: + description: + - Private IP address for the IP configuration. + type: str + sample: "10.1.0.10" + private_ip_address_version: + description: + - The version of the IP configuration. + type: str + sample: "IPv4" + private_ip_allocation_method: + description: + - Private IP allocation method. + type: str + sample: "Static" + public_ip_address: + description: + - Name of the public IP address. None for disable IP address. + type: dict + sample: { + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/publicIPAddresse + s/publicip001", + "name": "publicip001" + } + subnet: + description: + - The reference of the subnet resource. + type: dict + sample: { + "id": "/subscriptions/xxxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/ + myresourcegroup/providers/Microsoft.Network/virtualNetworks/tnb57dc95318/subnets/tnb57dc95318", + "name": "tnb57dc95318", + "resource_group": "myresourcegroup", + "virtual_network_name": "tnb57dc95318" + } + location: + description: + - The network interface resource location. + type: str + sample: eastus + mac_address: + description: + - The MAC address of the network interface. + type: str + name: + description: + - Name of the network interface. + type: str + sample: nic003 + network_security_group: + description: + - The reference of the network security group resource. + type: dict + sample: { + "id": "/subscriptions//xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/ + myResourceGroup/providers/Microsoft.Network/networkSecurityGroups/nsg001", + "name": "nsg001" + } + primary: + description: + - Get whether this is a primary network interface on virtual machine. + type: bool + sample: true + provisioning_state: + description: + - The provisioning state of the public IP resource. + type: str + sample: Succeeded + tags: + description: + -Tags of the network interface. + type: dict + sample: { 'key': 'value' } + type: + description: + - Type of the resource. + type: str + sample: "Microsoft.Network/networkInterfaces" +''' + +try: + from msrestazure.tools import parse_resource_id, resource_id, is_valid_resource_id + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import (AzureRMModuleBase, + azure_id_to_dict, + normalize_location_name, + format_resource_id + ) +from ansible.module_utils._text import to_native + + +def subnet_to_dict(subnet): + dic = azure_id_to_dict(subnet.id) + return dict( + id=subnet.id, + virtual_network_name=dic.get('virtualNetworks'), + resource_group=dic.get('resourceGroups'), + name=dic.get('subnets') + ) + + +def nic_to_dict(nic): + ip_configurations = [ + dict( + name=config.name, + private_ip_address=config.private_ip_address, + private_ip_address_version=config.private_ip_address_version, + private_ip_allocation_method=config.private_ip_allocation_method, + subnet=subnet_to_dict(config.subnet), + primary=config.primary if config.primary else False, + load_balancer_backend_address_pools=([item.id for item in config.load_balancer_backend_address_pools] + if config.load_balancer_backend_address_pools else None), + application_gateway_backend_address_pools=([item.id for item in config.application_gateway_backend_address_pools] + if config.application_gateway_backend_address_pools else None), + public_ip_address=dict( + id=config.public_ip_address.id, + name=azure_id_to_dict(config.public_ip_address.id).get('publicIPAddresses'), + public_ip_allocation_method=config.public_ip_address.public_ip_allocation_method + ) if config.public_ip_address else None, + application_security_groups=([asg.id for asg in config.application_security_groups] + if config.application_security_groups else None) + ) for config in nic.ip_configurations + ] + return dict( + id=nic.id, + name=nic.name, + type=nic.type, + location=nic.location, + tags=nic.tags, + network_security_group=dict( + id=nic.network_security_group.id, + name=azure_id_to_dict(nic.network_security_group.id).get('networkSecurityGroups') + ) if nic.network_security_group else None, + dns_settings=dict( + dns_servers=nic.dns_settings.dns_servers, + applied_dns_servers=nic.dns_settings.applied_dns_servers, + internal_dns_name_label=nic.dns_settings.internal_dns_name_label, + internal_fqdn=nic.dns_settings.internal_fqdn + ), + ip_configurations=ip_configurations, + ip_configuration=ip_configurations[0] if len(ip_configurations) == 1 else None, # for compatible issue, keep this field + mac_address=nic.mac_address, + enable_ip_forwarding=nic.enable_ip_forwarding, + provisioning_state=nic.provisioning_state, + etag=nic.etag, + enable_accelerated_networking=nic.enable_accelerated_networking, + dns_servers=nic.dns_settings.dns_servers, + ) + + +ip_configuration_spec = dict( + name=dict(type='str', required=True), + private_ip_address=dict(type='str'), + private_ip_address_version=dict(type='str', choices=['IPv4', 'IPv6'], default='IPv4'), + private_ip_allocation_method=dict(type='str', choices=['Dynamic', 'Static'], default='Dynamic'), + public_ip_address_name=dict(type='str', aliases=['public_ip_address', 'public_ip_name']), + public_ip_allocation_method=dict(type='str', choices=['Dynamic', 'Static'], default='Dynamic'), + load_balancer_backend_address_pools=dict(type='list'), + application_gateway_backend_address_pools=dict(type='list'), + primary=dict(type='bool', default=False), + application_security_groups=dict(type='list', elements='raw') +) + + +class AzureRMNetworkInterface(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True), + name=dict(type='str', required=True), + location=dict(type='str'), + enable_accelerated_networking=dict(type='bool', default=False), + create_with_security_group=dict(type='bool', default=True), + security_group=dict(type='raw', aliases=['security_group_name']), + state=dict(default='present', choices=['present', 'absent']), + private_ip_address=dict(type='str'), + private_ip_allocation_method=dict(type='str', choices=['Dynamic', 'Static'], default='Dynamic'), + public_ip_address_name=dict(type='str', aliases=['public_ip_address', 'public_ip_name']), + public_ip=dict(type='bool', default=True), + subnet_name=dict(type='str', aliases=['subnet']), + virtual_network=dict(type='raw', aliases=['virtual_network_name']), + public_ip_allocation_method=dict(type='str', choices=['Dynamic', 'Static'], default='Dynamic'), + ip_configurations=dict(type='list', default=None, elements='dict', options=ip_configuration_spec), + os_type=dict(type='str', choices=['Windows', 'Linux'], default='Linux'), + open_ports=dict(type='list'), + enable_ip_forwarding=dict(type='bool', aliases=['ip_forwarding'], default=False), + dns_servers=dict(type='list'), + ) + + required_if = [ + ('state', 'present', ['subnet_name', 'virtual_network']) + ] + + self.resource_group = None + self.name = None + self.location = None + self.create_with_security_group = None + self.enable_accelerated_networking = None + self.security_group = None + self.private_ip_address = None + self.private_ip_allocation_method = None + self.public_ip_address_name = None + self.public_ip = None + self.subnet_name = None + self.virtual_network = None + self.public_ip_allocation_method = None + self.state = None + self.tags = None + self.os_type = None + self.open_ports = None + self.enable_ip_forwarding = None + self.ip_configurations = None + self.dns_servers = None + + self.results = dict( + changed=False, + state=dict(), + ) + + super(AzureRMNetworkInterface, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + required_if=required_if) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + results = None + changed = False + nic = None + nsg = None + + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + # Set default location + self.location = resource_group.location + self.location = normalize_location_name(self.location) + + # parse the virtual network resource group and name + self.virtual_network = self.parse_resource_to_dict(self.virtual_network) + + # if not set the security group name, use nic name for default + self.security_group = self.parse_resource_to_dict(self.security_group or self.name) + + # if application security groups set, convert to resource id format + if self.ip_configurations: + for config in self.ip_configurations: + if config.get('application_security_groups'): + asgs = [] + for asg in config['application_security_groups']: + asg_resource_id = asg + if isinstance(asg, str) and (not is_valid_resource_id(asg)): + asg = self.parse_resource_to_dict(asg) + if isinstance(asg, dict): + asg_resource_id = format_resource_id(val=asg['name'], + subscription_id=self.subscription_id, + namespace='Microsoft.Network', + types='applicationSecurityGroups', + resource_group=asg['resource_group']) + asgs.append(asg_resource_id) + if len(asgs) > 0: + config['application_security_groups'] = asgs + + if self.state == 'present' and not self.ip_configurations: + # construct the ip_configurations array for compatible + self.deprecate('Setting ip_configuration flatten is deprecated and will be removed.' + ' Using ip_configurations list to define the ip configuration', version=(2, 9)) + self.ip_configurations = [ + dict( + private_ip_address=self.private_ip_address, + private_ip_allocation_method=self.private_ip_allocation_method, + public_ip_address_name=self.public_ip_address_name if self.public_ip else None, + public_ip_allocation_method=self.public_ip_allocation_method, + name='default', + primary=True + ) + ] + + try: + self.log('Fetching network interface {0}'.format(self.name)) + nic = self.network_client.network_interfaces.get(self.resource_group, self.name) + + self.log('Network interface {0} exists'.format(self.name)) + self.check_provisioning_state(nic, self.state) + results = nic_to_dict(nic) + self.log(results, pretty_print=True) + + nsg = None + if self.state == 'present': + # check for update + update_tags, results['tags'] = self.update_tags(results['tags']) + if update_tags: + changed = True + + if self.create_with_security_group != bool(results.get('network_security_group')): + self.log("CHANGED: add or remove network interface {0} network security group".format(self.name)) + changed = True + + if self.enable_accelerated_networking != bool(results.get('enable_accelerated_networking')): + self.log("CHANGED: Accelerated Networking set to {0} (previously {1})".format( + self.enable_accelerated_networking, + results.get('enable_accelerated_networking'))) + changed = True + + if self.enable_ip_forwarding != bool(results.get('enable_ip_forwarding')): + self.log("CHANGED: IP forwarding set to {0} (previously {1})".format( + self.enable_ip_forwarding, + results.get('enable_ip_forwarding'))) + changed = True + + # We need to ensure that dns_servers are list like + dns_servers_res = results.get('dns_settings').get('dns_servers') + _dns_servers_set = sorted(self.dns_servers) if isinstance(self.dns_servers, list) else list() + _dns_servers_res = sorted(dns_servers_res) if isinstance(self.dns_servers, list) else list() + if _dns_servers_set != _dns_servers_res: + self.log("CHANGED: DNS servers set to {0} (previously {1})".format( + ", ".join(_dns_servers_set), + ", ".join(_dns_servers_res))) + changed = True + + if not changed: + nsg = self.get_security_group(self.security_group['resource_group'], self.security_group['name']) + if nsg and results.get('network_security_group') and results['network_security_group'].get('id') != nsg.id: + self.log("CHANGED: network interface {0} network security group".format(self.name)) + changed = True + + if results['ip_configurations'][0]['subnet']['virtual_network_name'] != self.virtual_network['name']: + self.log("CHANGED: network interface {0} virtual network name".format(self.name)) + changed = True + + if results['ip_configurations'][0]['subnet']['resource_group'] != self.virtual_network['resource_group']: + self.log("CHANGED: network interface {0} virtual network resource group".format(self.name)) + changed = True + + if results['ip_configurations'][0]['subnet']['name'] != self.subnet_name: + self.log("CHANGED: network interface {0} subnet name".format(self.name)) + changed = True + + # check the ip_configuration is changed + # construct two set with the same structure and then compare + # the list should contains: + # name, private_ip_address, public_ip_address_name, private_ip_allocation_method, subnet_name + ip_configuration_result = self.construct_ip_configuration_set(results['ip_configurations']) + ip_configuration_request = self.construct_ip_configuration_set(self.ip_configurations) + ip_configuration_result_name = [item['name'] for item in ip_configuration_result] + for item_request in ip_configuration_request: + if item_request['name'] not in ip_configuration_result_name: + changed = True + break + else: + for item_result in ip_configuration_result: + if len(ip_configuration_request) == 1 and len(ip_configuration_result) == 1: + item_request['primary'] = True + if item_request['name'] == item_result['name'] and item_request != item_result: + changed = True + break + + elif self.state == 'absent': + self.log("CHANGED: network interface {0} exists but requested state is 'absent'".format(self.name)) + changed = True + except ResourceNotFoundError: + self.log('Network interface {0} does not exist'.format(self.name)) + if self.state == 'present': + self.log("CHANGED: network interface {0} does not exist but requested state is 'present'".format(self.name)) + changed = True + + self.results['changed'] = changed + self.results['state'] = results + + if self.check_mode: + return self.results + + if changed: + if self.state == 'present': + subnet = self.network_models.SubResource( + id='/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/virtualNetworks/{2}/subnets/{3}'.format( + self.virtual_network['subscription_id'], + self.virtual_network['resource_group'], + self.virtual_network['name'], + self.subnet_name)) + + nic_ip_configurations = [ + self.network_models.NetworkInterfaceIPConfiguration( + private_ip_allocation_method=ip_config.get('private_ip_allocation_method'), + private_ip_address=ip_config.get('private_ip_address'), + private_ip_address_version=ip_config.get('private_ip_address_version'), + name=ip_config.get('name'), + subnet=subnet, + public_ip_address=self.get_or_create_public_ip_address(ip_config), + load_balancer_backend_address_pools=([self.network_models.BackendAddressPool(id=self.backend_addr_pool_id(bap_id)) + for bap_id in ip_config.get('load_balancer_backend_address_pools')] + if ip_config.get('load_balancer_backend_address_pools') else None), + application_gateway_backend_address_pools=([self.network_models.ApplicationGatewayBackendAddressPool + (id=self.gateway_backend_addr_pool_id(bap_id)) + for bap_id in ip_config.get('application_gateway_backend_address_pools')] + if ip_config.get('application_gateway_backend_address_pools') else None), + primary=ip_config.get('primary'), + application_security_groups=([self.network_models.ApplicationSecurityGroup(id=asg_id) + for asg_id in ip_config.get('application_security_groups')] + if ip_config.get('application_security_groups') else None) + ) for ip_config in self.ip_configurations + ] + + nsg = self.create_default_securitygroup(self.security_group['resource_group'], + self.location, + self.security_group['name'], + self.os_type, + self.open_ports) if self.create_with_security_group else None + + self.log('Creating or updating network interface {0}'.format(self.name)) + nic = self.network_models.NetworkInterface( + id=results['id'] if results else None, + location=self.location, + tags=self.tags, + ip_configurations=nic_ip_configurations, + enable_accelerated_networking=self.enable_accelerated_networking, + enable_ip_forwarding=self.enable_ip_forwarding, + network_security_group=nsg + ) + if self.dns_servers: + dns_settings = self.network_models.NetworkInterfaceDnsSettings( + dns_servers=self.dns_servers) + nic.dns_settings = dns_settings + self.results['state'] = self.create_or_update_nic(nic) + elif self.state == 'absent': + self.log('Deleting network interface {0}'.format(self.name)) + self.delete_nic() + # Delete doesn't return anything. If we get this far, assume success + self.results['state']['status'] = 'Deleted' + + return self.results + + def get_or_create_public_ip_address(self, ip_config): + name = ip_config.get('public_ip_address_name') + + if not (self.public_ip and name): + return None + + pip = self.get_public_ip_address(name) + if not pip: + params = self.network_models.PublicIPAddress( + location=self.location, + public_ip_allocation_method=ip_config.get('public_ip_allocation_method'), + ) + try: + poller = self.network_client.public_ip_addresses.begin_create_or_update(self.resource_group, name, params) + pip = self.get_poller_result(poller) + except Exception as exc: + self.fail("Error creating {0} - {1}".format(name, str(exc))) + return pip + + def create_or_update_nic(self, nic): + try: + poller = self.network_client.network_interfaces.begin_create_or_update(self.resource_group, self.name, nic) + new_nic = self.get_poller_result(poller) + return nic_to_dict(new_nic) + except Exception as exc: + self.fail("Error creating or updating network interface {0} - {1}".format(self.name, str(exc))) + + def delete_nic(self): + try: + poller = self.network_client.network_interfaces.begin_delete(self.resource_group, self.name) + self.get_poller_result(poller) + except Exception as exc: + self.fail("Error deleting network interface {0} - {1}".format(self.name, str(exc))) + return True + + def get_public_ip_address(self, name): + self.log("Fetching public ip address {0}".format(name)) + try: + return self.network_client.public_ip_addresses.get(self.resource_group, name) + except ResourceNotFoundError as exc: + return None + + def get_security_group(self, resource_group, name): + self.log("Fetching security group {0}".format(name)) + try: + return self.network_client.network_security_groups.get(resource_group, name) + except ResourceNotFoundError as exc: + return None + + def backend_addr_pool_id(self, val): + if isinstance(val, dict): + lb = val.get('load_balancer', None) + name = val.get('name', None) + if lb and name: + return resource_id(subscription=self.subscription_id, + resource_group=self.resource_group, + namespace='Microsoft.Network', + type='loadBalancers', + name=lb, + child_type_1='backendAddressPools', + child_name_1=name) + return val + + def gateway_backend_addr_pool_id(self, val): + if isinstance(val, dict): + appgw = val.get('application_gateway', None) + name = val.get('name', None) + if appgw and name: + return resource_id(subscription=self.subscription_id, + resource_group=self.resource_group, + namespace='Microsoft.Network', + type='applicationGateways', + name=appgw, + child_type_1='backendAddressPools', + child_name_1=name) + return val + + def construct_ip_configuration_set(self, raw): + configurations = [dict( + private_ip_allocation_method=to_native(item.get('private_ip_allocation_method')), + public_ip_address_name=(to_native(item.get('public_ip_address').get('name')) + if item.get('public_ip_address') else to_native(item.get('public_ip_address_name'))), + primary=item.get('primary'), + load_balancer_backend_address_pools=(set([to_native(self.backend_addr_pool_id(id)) + for id in item.get('load_balancer_backend_address_pools')]) + if item.get('load_balancer_backend_address_pools') else None), + application_gateway_backend_address_pools=(set([to_native(self.gateway_backend_addr_pool_id(id)) + for id in item.get('application_gateway_backend_address_pools')]) + if item.get('application_gateway_backend_address_pools') else None), + application_security_groups=(set([to_native(asg_id) for asg_id in item.get('application_security_groups')]) + if item.get('application_security_groups') else None), + name=to_native(item.get('name')) + ) for item in raw] + return configurations + + +def main(): + AzureRMNetworkInterface() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_networkinterface_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_networkinterface_info.py new file mode 100644 index 000000000..10b55c6fc --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_networkinterface_info.py @@ -0,0 +1,358 @@ +#!/usr/bin/python +# +# Copyright (c) 2016 Matt Davis, +# Chris Houseknecht, + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_networkinterface_info + +version_added: "0.1.2" + +short_description: Get network interface facts + +description: + - Get facts for a specific network interface or all network interfaces within a resource group. + +options: + name: + description: + - Only show results for a specific network interface. + resource_group: + description: + - Name of the resource group containing the network interface(s). Required when searching by name. + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Chris Houseknecht (@chouseknecht) + - Matt Davis (@nitzmahone) + +''' + +EXAMPLES = ''' + - name: Get facts for one network interface + azure_rm_networkinterface_info: + resource_group: myResourceGroup + name: nic001 + + - name: Get network interfaces within a resource group + azure_rm_networkinterface_info: + resource_group: myResourceGroup + + - name: Get network interfaces by tag + azure_rm_networkinterface_info: + resource_group: myResourceGroup + tags: + - testing + - foo:bar +''' + +RETURN = ''' +azure_networkinterfaces: + description: + - List of network interface dicts. + returned: always + type: list + example: [{ + "dns_settings": { + "applied_dns_servers": [], + "dns_servers": [], + "internal_dns_name_label": null, + "internal_fqdn": null + }, + "enable_ip_forwarding": false, + "etag": 'W/"59726bfc-08c4-44ed-b900-f6a559876a9d"', + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkInterfaces/nic003", + "ip_configuration": { + "name": "default", + "private_ip_address": "10.10.0.4", + "private_ip_allocation_method": "Dynamic", + "public_ip_address": { + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/publicIPAddresses/publicip001", + "name": "publicip001" + }, + "subnet": { + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/virtualNetworks/vnet001/subnets/subnet001", + "name": "subnet001", + "virtual_network_name": "vnet001" + } + }, + "location": "westus", + "mac_address": null, + "name": "nic003", + "network_security_group": { + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkSecurityGroups/secgroup001", + "name": "secgroup001" + }, + "primary": null, + "provisioning_state": "Succeeded", + "tags": {}, + "type": "Microsoft.Network/networkInterfaces" + }] +networkinterfaces: + description: + - List of network interface dicts. Each dict contains parameters can be passed to M(azure.azcollection.azure_rm_networkinterface) module. + type: list + returned: always + contains: + id: + description: + - Id of the network interface. + resource_group: + description: + - Name of a resource group where the network interface exists. + name: + description: + - Name of the network interface. + location: + description: + - Azure location. + virtual_network: + description: + - An existing virtual network with which the network interface will be associated. + - It is a dict which contains I(name) and I(resource_group) of the virtual network. + subnet: + description: + - Name of an existing subnet within the specified virtual network. + tags: + description: + - Tags of the network interface. + ip_configurations: + description: + - List of IP configurations, if contains multiple configurations. + contains: + name: + description: + - Name of the IP configuration. + private_ip_address: + description: + - Private IP address for the IP configuration. + private_ip_allocation_method: + description: + - Private IP allocation method. + public_ip_address: + description: + - Name of the public IP address. None for disable IP address. + public_ip_allocation_method: + description: + - Public IP allocation method. + load_balancer_backend_address_pools: + description: + - List of existing load-balancer backend address pools associated with the network interface. + application_gateway_backend_address_pools: + description: + - List of existing application gateway backend address pools associated with the network interface. + version_added: "1.10.0" + primary: + description: + - Whether the IP configuration is the primary one in the list. + application_security_groups: + description: + - List of Application security groups. + sample: /subscriptions//resourceGroups//providers/Microsoft.Network/applicationSecurityGroups/myASG + enable_accelerated_networking: + description: + - Specifies whether the network interface should be created with the accelerated networking feature or not. + create_with_security_group: + description: + - Specifies whether a default security group should be be created with the NIC. Only applies when creating a new NIC. + type: bool + security_group: + description: + - A security group resource ID with which to associate the network interface. + enable_ip_forwarding: + description: + - Whether to enable IP forwarding + dns_servers: + description: + - Which DNS servers should the NIC lookup. + - List of IP addresses. + mac_address: + description: + - The MAC address of the network interface. + provisioning_state: + description: + - The provisioning state of the network interface. + dns_settings: + description: + - The DNS settings in network interface. + contains: + dns_servers: + description: + - List of DNS servers IP addresses. + applied_dns_servers: + description: + - If the VM that uses this NIC is part of an Availability Set, then this list will have the union of all DNS servers + from all NICs that are part of the Availability Set. This property is what is configured on each of those VMs. + internal_dns_name_label: + description: + - Relative DNS name for this NIC used for internal communications between VMs in the same virtual network. + internal_fqdn: + description: + - Fully qualified DNS name supporting internal communications between VMs in the same virtual network. +''' # NOQA +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.common import AzureMissingResourceHttpError, AzureHttpError +except Exception: + # This is handled in azure_rm_common + pass + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase, azure_id_to_dict + + +AZURE_OBJECT_CLASS = 'NetworkInterface' + + +def nic_to_dict(nic): + ip_configurations = [ + dict( + name=config.name, + private_ip_address=config.private_ip_address, + private_ip_allocation_method=config.private_ip_allocation_method, + primary=config.primary if config.primary else False, + load_balancer_backend_address_pools=([item.id for item in config.load_balancer_backend_address_pools] + if config.load_balancer_backend_address_pools else None), + application_gateway_backend_address_pools=([item.id for item in config.application_gateway_backend_address_pools] + if config.application_gateway_backend_address_pools else None), + public_ip_address=config.public_ip_address.id if config.public_ip_address else None, + public_ip_allocation_method=config.public_ip_address.public_ip_allocation_method if config.public_ip_address else None, + application_security_groups=([asg.id for asg in config.application_security_groups] + if config.application_security_groups else None) + ) for config in nic.ip_configurations + ] + config = nic.ip_configurations[0] if len(nic.ip_configurations) > 0 else None + subnet_dict = azure_id_to_dict(config.subnet.id) if config and config.subnet else None + subnet = subnet_dict.get('subnets') if subnet_dict else None + virtual_network = dict( + resource_group=subnet_dict.get('resourceGroups'), + name=subnet_dict.get('virtualNetworks')) if subnet_dict else None + return dict( + id=nic.id, + resource_group=azure_id_to_dict(nic.id).get('resourceGroups'), + name=nic.name, + subnet=subnet, + virtual_network=virtual_network, + location=nic.location, + tags=nic.tags, + security_group=nic.network_security_group.id if nic.network_security_group else None, + dns_settings=dict( + dns_servers=nic.dns_settings.dns_servers, + applied_dns_servers=nic.dns_settings.applied_dns_servers, + internal_dns_name_label=nic.dns_settings.internal_dns_name_label, + internal_fqdn=nic.dns_settings.internal_fqdn + ), + ip_configurations=ip_configurations, + mac_address=nic.mac_address, + enable_ip_forwarding=nic.enable_ip_forwarding, + provisioning_state=nic.provisioning_state, + enable_accelerated_networking=nic.enable_accelerated_networking, + dns_servers=nic.dns_settings.dns_servers, + ) + + +class AzureRMNetworkInterfaceInfo(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type='str'), + resource_group=dict(type='str'), + tags=dict(type='list', elements='str') + ) + + self.results = dict( + changed=False, + ) + + self.name = None + self.resource_group = None + self.tags = None + + super(AzureRMNetworkInterfaceInfo, self).__init__(self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + facts_module=True + ) + + def exec_module(self, **kwargs): + + is_old_facts = self.module._name == 'azure_rm_networkinterface_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_networkinterface_facts' module has been renamed to 'azure_rm_networkinterface_info'", + version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.name and not self.resource_group: + self.fail("Parameter error: resource group required when filtering by name.") + + results = [] + + if self.name: + results = self.get_item() + elif self.resource_group: + results = self.list_resource_group() + else: + results = self.list_all() + + if is_old_facts: + self.results['ansible_facts'] = { + 'azure_networkinterfaces': self.serialize_nics(results) + } + self.results['networkinterfaces'] = self.to_dict_list(results) + return self.results + + def get_item(self): + self.log('Get properties for {0}'.format(self.name)) + item = None + try: + item = self.network_client.network_interfaces.get(self.resource_group, self.name) + except ResourceNotFoundError: + pass + + return [item] if item and self.has_tags(item.tags, self.tags) else [] + + def list_resource_group(self): + self.log('List for resource group') + try: + response = self.network_client.network_interfaces.list(self.resource_group) + return [item for item in response if self.has_tags(item.tags, self.tags)] + except ResourceNotFoundError as exc: + self.fail("Error listing by resource group {0} - {1}".format(self.resource_group, str(exc))) + + def list_all(self): + self.log('List all') + try: + response = self.network_client.network_interfaces.list_all() + return [item for item in response if self.has_tags(item.tags, self.tags)] + except ResourceNotFoundError as exc: + self.fail("Error listing all - {0}".format(str(exc))) + + def serialize_nics(self, raws): + return [self.serialize_obj(item, AZURE_OBJECT_CLASS) for item in raws] if raws else [] + + def to_dict_list(self, raws): + return [nic_to_dict(item) for item in raws] if raws else [] + + +def main(): + AzureRMNetworkInterfaceInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_notificationhub.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_notificationhub.py new file mode 100644 index 000000000..53c07cc81 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_notificationhub.py @@ -0,0 +1,390 @@ +#!/usr/bin/python +# +# Copyright (c) 2021 Praveen Ghuge (@praveenghuge), Karl Dasan (@karldas30) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_notificationhub +version_added: "1.7.0" +short_description: Manage Notification Hub +description: + - Create, update and delete instance of Notification Hub. +options: + resource_group: + description: + - Name of the resource group to which the resource belongs. + required: True + type: str + namespace_name: + description: + - Name of the namespace in which to create notification hub. + required: True + type: str + name: + description: + - Unique name of the Notification Hub. + type: str + location: + description: + - Resource location. If not set, location from the resource group will be used as default. + type: str + sku: + description: + - The name of the SKU. + - Please see L(https://azure.microsoft.com/en-in/pricing/details/notification-hubs/,). + default: free + choices: + - free + - basic + - standard + type: str + state: + description: + - Assert the state of the Notification Hub. + - Use C(present) to create or update an notification hub and C(absent) to delete it. + default: present + choices: + - absent + - present + type: str +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Praveen Ghuge (@praveenghuge) + - Karl Dasan (@karldas30) +''' +EXAMPLES = ''' + +- name: "Create Notification Hub" + azure_rm_notificationhub: + resource_group: testgroupans + location: eastus + namespace_name: myNamespace + name: myhub + tags: + - a: b + sku: free + +- name: Delete Notification Hub + azure_rm_notificationhub: + resource_group: testgroupans + name: myNamespace + state: absent + +- name: "Create Notification Hub Namespace" + azure_rm_notificationhub: + resource_group: testgroupans + location: eastus + namespace_name: myNamespace + tags: + - a: b + sku: free + +- name: Delete Notification Hub Namespace + azure_rm_notificationhub: + resource_group: testgroupans + namespace_name: myNamespace + state: absent + +''' + +RETURN = ''' +state: + description: + - Current state of the Notification Hub namesapce or Notification Hub. + returned: always + type: dict + sample: { + "additional_properties": {}, + "critical": false, + "data_center": null, + "enabled": true, + "location": "eastus2", + "metric_id": null, + "name": "testnaedd3d22d3w", + "namespace_type": "NotificationHub", + "provisioning_state": "Succeeded", + "region": null, + "scale_unit": null, + "service_bus_endpoint": "https://testnaedd3d22d3w.servicebus.windows.net:443/", + "sku": { "name":"Free" }, + "tags": { + "a": "b" + }, + "type": "Microsoft.NotificationHubs/namespaces" + } +''' + +import time +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.mgmt.notificationhubs.models import NotificationHubCreateOrUpdateParameters, NamespaceCreateOrUpdateParameters + from azure.mgmt.notificationhubs.models import Sku +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureNotificationHub(AzureRMModuleBase): + + def __init__(self): + # define user inputs from playbook + + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True), + namespace_name=dict(type='str', required=True), + name=dict(type='str'), + location=dict(type='str'), + sku=dict(type='str', choices=[ + 'free', 'basic', 'standard'], default='free'), + state=dict(choices=['present', 'absent'], + default='present', type='str'), + ) + + self.resource_group = None + self.namespace_name = None + self.name = None + self.sku = None + self.location = None + self.authorizations = None + self.tags = None + self.state = None + self.results = dict( + changed=False, + state=dict() + ) + + super(AzureNotificationHub, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + self.results['check_mode'] = self.check_mode + + # retrieve resource group to make sure it exists + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + # Set default location + self.location = resource_group.location + + results = dict() + changed = False + + try: + self.log( + 'Fetching Notification Hub Namespace {0}'.format(self.name)) + namespace = self.notification_hub_client.namespaces.get( + self.resource_group, self.namespace_name) + + results = namespace_to_dict(namespace) + if self.name: + self.log('Fetching Notification Hub {0}'.format(self.name)) + notification_hub = self.notification_hub_client.notification_hubs.get( + self.resource_group, self.namespace_name, self.name) + results = notification_hub_to_dict( + notification_hub) + # don't change anything if creating an existing namespace, but change if deleting it + if self.state == 'present': + changed = False + + update_tags, results['tags'] = self.update_tags( + results['tags']) + + if update_tags: + changed = True + elif self.namespace_name and not self.name: + if self.sku != results['sku']['name'].lower(): + changed = True + + elif self.state == 'absent': + changed = True + + except ResourceNotFoundError: + # the notification hub does not exist so create it + if self.state == 'present': + changed = True + else: + # you can't delete what is not there + changed = False + + self.results['changed'] = changed + if self.name and not changed: + self.results['state'] = results + + # return the results if your only gathering information + if self.check_mode: + return self.results + + if changed: + if self.state == "present": + if self.name is None: + self.results['state'] = self.create_or_update_namespaces() + elif self.namespace_name and self.name: + self.results['state'] = self.create_or_update_notification_hub() + elif self.state == "absent": + # delete Notification Hub + if self.name is None: + self.delete_namespace() + elif self.namespace_name and self.name: + self.delete_notification_hub() + self.results['state']['status'] = 'Deleted' + + return self.results + + def create_or_update_namespaces(self): + ''' + create or update namespaces + ''' + try: + namespace_params = NamespaceCreateOrUpdateParameters( + location=self.location, + namespace_type="NotificationHub", + sku=Sku(name=self.sku), + tags=self.tags + ) + result = self.notification_hub_client.namespaces.create_or_update( + self.resource_group, + self.namespace_name, + namespace_params) + + namespace = self.notification_hub_client.namespaces.get( + self.resource_group, + self.namespace_name) + + while namespace.status == "Created": + time.sleep(30) + namespace = self.notification_hub_client.namespaces.get( + self.resource_group, + self.namespace_name, + ) + except Exception as ex: + self.fail("Failed to create namespace {0} in resource group {1}: {2}".format( + self.namespace_name, self.resource_group, str(ex))) + return namespace_to_dict(result) + + def create_or_update_notification_hub(self): + ''' + Create or update Notification Hub. + :return: create or update Notification Hub instance state dictionary + ''' + try: + response = self.create_or_update_namespaces() + params = NotificationHubCreateOrUpdateParameters( + location=self.location, + sku=Sku(name=self.sku), + tags=self.tags + ) + result = self.notification_hub_client.notification_hubs.create_or_update( + self.resource_group, + self.namespace_name, + self.name, + params) + self.log("Response : {0}".format(result)) + except Exception as ex: + self.fail("Failed to create notification hub {0} in resource group {1}: {2}".format( + self.name, self.resource_group, str(ex))) + return notification_hub_to_dict(result) + + def delete_notification_hub(self): + ''' + Deletes specified notication hub + :return True + ''' + self.log("Deleting the notification hub {0}".format(self.name)) + try: + result = self.notification_hub_client.notification_hubs.delete( + self.resource_group, self.namespace_name, self.name) + except Exception as e: + self.log('Error attempting to delete notification hub.') + self.fail( + "Error deleting the notification hub : {0}".format(str(e))) + return True + + def delete_namespace(self): + ''' + Deletes specified namespace + :return True + ''' + self.log("Deleting the namespace {0}".format(self.namespace_name)) + try: + result = self.notification_hub_client.namespaces.begin_delete( + self.resource_group, self.namespace_name) + except Exception as e: + self.log('Error attempting to delete namespace.') + self.fail( + "Error deleting the namespace : {0}".format(str(e))) + return True + + +def notification_hub_to_dict(item): + # turn notification hub object into a dictionary (serialization) + notification_hub = item.as_dict() + result = dict( + additional_properties=notification_hub.get( + 'additional_properties', {}), + id=notification_hub.get('id', None), + name=notification_hub.get('name', None), + type=notification_hub.get('type', None), + location=notification_hub.get( + 'location', '').replace(' ', '').lower(), + tags=notification_hub.get('tags', None), + provisioning_state=notification_hub.get( + 'provisioning_state', None), + region=notification_hub.get('region', None), + metric_id=notification_hub.get('metric_id', None), + service_bus_endpoint=notification_hub.get( + 'service_bus_endpoint', None), + scale_unit=notification_hub.get('scale_unit', None), + enabled=notification_hub.get('enabled', None), + critical=notification_hub.get('critical', None), + data_center=notification_hub.get('data_center', None), + namespace_type=notification_hub.get('namespace_type', None) + ) + return result + + +def namespace_to_dict(item): + # turn notification hub namespace object into a dictionary (serialization) + namespace = item.as_dict() + result = dict( + additional_properties=namespace.get( + 'additional_properties', {}), + name=namespace.get('name', None), + type=namespace.get('type', None), + location=namespace.get( + 'location', '').replace(' ', '').lower(), + sku=namespace.get("sku"), + tags=namespace.get('tags', None), + provisioning_state=namespace.get( + 'provisioning_state', None), + region=namespace.get('region', None), + metric_id=namespace.get('metric_id', None), + service_bus_endpoint=namespace.get( + 'service_bus_endpoint', None), + scale_unit=namespace.get('scale_unit', None), + enabled=namespace.get('enabled', None), + critical=namespace.get('critical', None), + data_center=namespace.get('data_center', None), + namespace_type=namespace.get('namespace_type', None) + ) + return result + + +def main(): + AzureNotificationHub() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_notificationhub_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_notificationhub_info.py new file mode 100644 index 000000000..5ab211c6d --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_notificationhub_info.py @@ -0,0 +1,239 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 Praveen Ghuge (@praveenghuge), Karl Dasan (@karldas30) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_notificationhub_info +version_added: "1.7.0" +short_description: Get Azure Notification Hub +description: + - Get facts of Azure Notification Hub + +options: + resource_group: + description: + - The name of the resource group. + required: True + type: str + namespace_name: + description: + - The name of the namspace. + type: str + name: + description: + - The name of the Notification hub. + type: str + + +extends_documentation_fragment: + - azure.azcollection.azure + + +author: + - Praveen Ghuge (@praveenghuge) + - Karl Dasan (@karldas30) +''' + + +EXAMPLES = ''' + - name: Get facts of specific notification hub + community.azure.azure_rm_notificationhub_info: + resource_group: myResourceGroup + name: myNotificationHub + +''' + +RETURN = ''' +state: + description: + - Current state of the Notification Hub namesapce or Notification Hub. + returned: always + type: dict + sample: { + "additional_properties": {}, + "critical": false, + "data_center": null, + "enabled": true, + "location": "eastus2", + "metric_id": null, + "name": "testnaedd3d22d3w", + "namespace_type": "NotificationHub", + "provisioning_state": "Succeeded", + "region": null, + "scale_unit": null, + "service_bus_endpoint": "https://testnaedd3d22d3w.servicebus.windows.net:443/", + "sku": {"name":"Free"}, + "tags": { + "a": "b" + }, + "type": "Microsoft.NotificationHubs/namespaces" + } +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureNotificationHubInfo(AzureRMModuleBase): + def __init__(self): + + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + namespace_name=dict( + type='str' + ), + name=dict( + type='str', + ) + ) + # store the results of the module operation + self.results = dict( + changed=False) + self.resource_group = None + self.namespace_name = None + self.name = None + self.tags = None + + super(AzureNotificationHubInfo, self).__init__( + self.module_arg_spec, supports_check_mode=True, supports_tags=False) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.name is None and self.namespace_name is None: + results = self.list_all_namespace() + self.results['namespaces'] = [ + self.namespace_to_dict(x) for x in results] + elif self.name and self.namespace_name: + results = self.get_notification_hub() + self.results['notificationhub'] = [ + self.notification_hub_to_dict(x) for x in results] + elif self.namespace_name: + results = self.get_namespace() + self.results['namespace'] = [ + self.namespace_to_dict(x) for x in results] + + return self.results + + def get_namespace(self): + response = None + results = [] + try: + response = self.notification_hub_client.namespaces.get( + self.resource_group, self.namespace_name) + self.log("Response : {0}".format(response)) + + except ResourceNotFoundError as e: + self.fail('Could not get info for namespace. {0}').format( + str(e)) + + if response and self.has_tags(response.tags, self.tags): + results = [response] + return results + + def get_notification_hub(self): + response = None + results = [] + try: + response = self.notification_hub_client.notification_hubs.get( + self.resource_group, self.namespace_name, self.name) + self.log("Response : {0}".format(response)) + + except ResourceNotFoundError as e: + self.fail('Could not get info for notification hub. {0}').format( + str(e)) + + if response and self.has_tags(response.tags, self.tags): + results = [response] + return results + + def list_all_namespace(self): + self.log('List items for resource group') + try: + response = self.notification_hub_client.namespaces.list( + self.resource_group) + + except Exception as exc: + self.fail( + "Failed to list for resource group {0} - {1}".format(self.resource_group, str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(item) + return results + + def namespace_to_dict(self, item): + # turn notification hub object into a dictionary (serialization) + namespace = item.as_dict() + result = dict( + additional_properties=namespace.get( + 'additional_properties', {}), + name=namespace.get('name', None), + type=namespace.get('type', None), + location=namespace.get( + 'location', '').replace(' ', '').lower(), + sku=namespace.get("sku"), + tags=namespace.get('tags', None), + provisioning_state=namespace.get( + 'provisioning_state', None), + region=namespace.get('region', None), + metric_id=namespace.get('metric_id', None), + service_bus_endpoint=namespace.get( + 'service_bus_endpoint', None), + scale_unit=namespace.get('scale_unit', None), + enabled=namespace.get('enabled', None), + critical=namespace.get('critical', None), + data_center=namespace.get('data_center', None), + namespace_type=namespace.get('namespace_type', None) + ) + return result + + def notification_hub_to_dict(self, item): + # turn notification hub object into a dictionary (serialization) + notification_hub = item.as_dict() + result = dict( + additional_properties=notification_hub.get( + 'additional_properties', {}), + name=notification_hub.get('name', None), + type=notification_hub.get('type', None), + location=notification_hub.get( + 'location', '').replace(' ', '').lower(), + tags=notification_hub.get('tags', None), + name_properties_name=notification_hub.get( + 'name_properties_name', None), + registration_ttl=notification_hub.get('registration_ttl', None), + authorization_rules=notification_hub.get( + 'authorization_rules', None), + apns_credential=notification_hub.get( + 'apns_credential', None), + wns_credential=notification_hub.get('wns_credential', None), + gcm_credential=notification_hub.get('gcm_credential', None), + mpns_credential=notification_hub.get('mpns_credential', None), + adm_credential=notification_hub.get('adm_credential', None), + baidu_credential=notification_hub.get('baidu_credential', None) + ) + return result + + +def main(): + AzureNotificationHubInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_openshiftmanagedcluster.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_openshiftmanagedcluster.py new file mode 100644 index 000000000..cffee5ceb --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_openshiftmanagedcluster.py @@ -0,0 +1,855 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 haiyuazhang +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_openshiftmanagedcluster +version_added: '1.2.0' +short_description: Manage Azure Red Hat OpenShift Managed Cluster instance +description: + - Create, update and delete instance of Azure Red Hat OpenShift Managed Cluster instance. +options: + resource_group: + description: + - The name of the resource group. + required: true + type: str + name: + description: + - Resource name. + required: true + type: str + location: + description: + - Resource location. + required: true + type: str + cluster_profile: + description: + - Configuration for OpenShift cluster. + type: dict + default: {} + suboptions: + pull_secret: + description: + - Pull secret for the cluster (immutable). + type: str + default: "" + domain: + description: + - The domain for the cluster (immutable). + type: str + cluster_resource_group_id: + description: + - The ID of the cluster resource group (immutable). + type: str + version: + description: + - The Openshift version (immutable). + type: str + service_principal_profile: + description: + - service principal. + type: dict + suboptions: + client_id: + description: + - Client ID of the service principal (immutable). + required: true + type: str + client_secret: + description: + - Client secret of the service principal (immutable). + required: true + type: str + network_profile: + description: + - Configuration for OpenShift networking (immutable). + type: dict + default: {'pod_cidr' : '10.128.0.0/14', 'service_cidr' : '172.30.0.0/16'} + suboptions: + pod_cidr: + description: + - CIDR for the OpenShift Pods (immutable). + type: str + service_cidr: + description: + - CIDR for OpenShift Services (immutable). + type: str + master_profile: + description: + - Configuration for OpenShift master VMs. + type: dict + suboptions: + vm_size: + description: + - Size of agent VMs (immutable). + type: str + choices: + - Standard_D8s_v3 + - Standard_D16s_v3 + - Standard_D32s_v3 + subnet_id: + description: + - The Azure resource ID of the master subnet (immutable). + required: true + type: str + worker_profiles: + description: + - Configuration for OpenShift worker Vms. + type: list + suboptions: + name: + description: name of the worker profile (immutable). + type: str + required: true + choices: + - worker + vm_size: + description: + - The size of the worker Vms (immutable). + type: str + choices: + - Standard_D4s_v3 + - Standard_D8s_v3 + disk_size: + description: + - The disk size of the worker VMs in GB. Must be 128 or greater (immutable). + type: int + subnet_id: + description: + - The Azure resource ID of the worker subnet (immutable). + type: str + required: true + count: + description: + - The number of worker VMs. Must be between 3 and 20 (immutable). + type: int + api_server_profile: + description: + - API server configuration. + type: dict + suboptions: + visibility: + description: + - API server visibility. + type: str + default: Public + choices: + - Public + - Private + ip: + description: + - IP address of api server (immutable), only appears in response. + type: str + url: + description: + - Url of api server (immutable), only appears in response. + type: str + ingress_profiles: + description: + - Ingress profiles configuration. only one profile is supported at the current API version. + type: list + suboptions: + visibility: + description: + - Ingress visibility. + type: str + default: Public + choices: + - Public + - Private + name: + description: + - Name of the ingress (immutable). + type: str + default: default + choices: + - default + ip: + description: + - IP of the ingress (immutable), only appears in response. + type: str + provisioning_state: + description: + - The current deployment or provisioning state, which only appears in the response. + type: str + state: + description: + - Assert the state of the OpenShiftManagedCluster. + - Use C(present) to create or update an OpenShiftManagedCluster and C(absent) to delete it. + default: present + type: str + choices: + - absent + - present +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags +author: + - Haiyuan Zhang (@haiyuazhang) +''' + +EXAMPLES = ''' + - name: Create openshift cluster + azure_rm_openshiftmanagedcluster: + resource_group: "myResourceGroup" + name: "myCluster" + location: "eastus" + cluster_profile: + cluster_resource_group_id: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/clusterResourceGroup" + domain: "mydomain" + service_principal_profile: + client_id: "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + client_secret: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + network_profile: + pod_cidr: "10.128.0.0/14" + service_cidr: "172.30.0.0/16" + worker_profiles: + - vm_size : "Standard_D4s_v3" + subnet_id : "/subscriptions/xx-xx-xx-xx-xx/resourceGroups/myResourceGroup/Microsoft.Network/virtualNetworks/myVnet/subnets/worker" + disk_size : 128 + count : 3 + master_profile: + vm_size : "Standard_D8s_v3" + subnet_id: "/subscriptions/xx-xx-xx-xx-xx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/myVnet/subnets/master" + - name: Delete OpenShift Managed Cluster + azure_rm_openshiftmanagedcluster: + resource_group: myResourceGroup + name: myCluster + location: eastus + state: absent +''' + +RETURN = ''' +id: + description: + - Resource ID. + returned: always + type: str + sample: /subscriptions/xx-xx-xx-xx/resourceGroups/mycluster-eastus/providers/Microsoft.RedHatOpenShift/openShiftClusters/mycluster +name: + description: + - Resource name. + returned: always + type: str + sample: mycluster +type: + description: + - Resource type. + returned: always + type: str + sample: Microsoft.RedHatOpenShift/openShiftClusters +location: + description: + - Resource location. + returned: always + type: str + sample: eatus +properties: + description: + - Properties of a OpenShift managed cluster. + returned: always + type: complex + sample: null + contains: + provisioningState: + description: + - The current deployment or provisioning state, which only appears in the response. + returned: always + type: str + sample: Creating + clusterProfile: + description: + - Configuration for Openshift cluster. + returned: always + type: complex + contains: + domain: + description: + - Domain for the cluster. + returned: always + type: str + sample: mycluster + version: + description: + - Openshift version. + returned: always + type: str + sample: 4.4.17 + resourceGroupId: + description: + - The ID of the cluster resource group. + returned: always + type: str + sample: /subscriptions/xx-xx-xx-xx/resourceGroups/mycluster-eastus-cluster + servicePrincipalProfile: + description: + - Service principal. + type: complex + returned: always + contains: + clientId: + description: Client ID of the service principal. + returned: always + type: str + sample: xxxxxxxx-xxxx-xxxx-xxxxxxxxxxxx + networkProfile: + description: + - Configuration for OpenShift networking. + returned: always + type: complex + contains: + podCidr: + description: + - CIDR for the OpenShift Pods. + returned: always + type: str + sample: 10.128.0.0/14 + serviceCidr: + description: + - CIDR for OpenShift Services. + type: str + returned: always + sample: 172.30.0.0/16 + masterProfile: + description: + - Configuration for OpenShift master VMs. + returned: always + type: complex + contains: + vmSize: + description: + - Size of agent VMs (immutable). + type: str + returned: always + sample: Standard_D8s_v3 + subnetId: + description: + - The Azure resource ID of the master subnet (immutable). + type: str + returned: always + sample: /subscriptions/xx-xx-xx-xx/resourceGroups/mycluster-eastus/providers/Microsoft.Network/ + virtualNetworks/mycluster-vnet/subnets/mycluster-worker + workerProfiles: + description: + - Configuration of OpenShift cluster VMs. + returned: always + type: complex + contains: + name: + description: + - Unique name of the pool profile in the context of the subscription and resource group. + returned: always + type: str + sample: worker + count: + description: + - Number of agents (VMs) to host docker containers. + returned: always + type: int + sample: 3 + vmSize: + description: + - Size of agent VMs. + returned: always + type: str + sample: Standard_D4s_v3 + diskSizeGB: + description: + - disk size in GB. + returned: always + type: int + sample: 128 + subnetId: + description: + - Subnet ID for worker pool. + returned: always + type: str + sample: /subscriptions/xx-xx-xx-xx/resourceGroups/mycluster-eastus/providers/Microsoft.Network/ + virtualNetworks/mycluster-vnet/subnets/mycluster-worker + ingressProfiles: + description: + - Ingress configruation. + returned: always + type: list + sample: [{"name": "default", "visibility": "Public"}, ] + apiserverProfile: + description: + - API server configuration. + returned: always + type: complex + contains: + visibility: + description: + - api server visibility. + returned: always + type: str + sample: Public +''' + +import time +import json +import random +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_rest import GenericRestClient +try: + from msrestazure.azure_exceptions import CloudError +except ImportError: + # this is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMOpenShiftManagedClusters(AzureRMModuleBaseExt): + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + updatable=False, + disposition='resourceGroupName', + required=True + ), + name=dict( + type='str', + updatable=False, + disposition='resourceName', + required=True + ), + location=dict( + type='str', + updatable=False, + required=True, + disposition='/' + ), + cluster_profile=dict( + type='dict', + disposition='/properties/clusterProfile', + default=dict(), + options=dict( + pull_secret=dict( + type='str', + no_log=True, + updatable=False, + disposition='pullSecret', + purgeIfNone=True + ), + cluster_resource_group_id=dict( + type='str', + updatable=False, + disposition='resourceGroupId', + purgeIfNone=True + ), + domain=dict( + type='str', + updatable=False, + disposition='domain', + purgeIfNone=True + ), + version=dict( + type='str', + updatable=False, + disposition='version', + purgeIfNone=True + ) + ), + ), + service_principal_profile=dict( + type='dict', + disposition='/properties/servicePrincipalProfile', + options=dict( + client_id=dict( + type='str', + updatable=False, + disposition='clientId', + required=True + ), + client_secret=dict( + type='str', + no_log=True, + updatable=False, + disposition='clientSecret', + required=True + ) + ) + ), + network_profile=dict( + type='dict', + disposition='/properties/networkProfile', + options=dict( + pod_cidr=dict( + type='str', + updatable=False, + disposition='podCidr' + ), + service_cidr=dict( + type='str', + updatable=False, + disposition='serviceCidr' + ) + ), + default=dict( + pod_cidr="10.128.0.0/14", + service_cidr="172.30.0.0/16" + ) + ), + master_profile=dict( + type='dict', + disposition='/properties/masterProfile', + options=dict( + vm_size=dict( + type='str', + updatable=False, + disposition='vmSize', + choices=['Standard_D8s_v3', + 'Standard_D16s_v3', + 'Standard_D32s_v3'], + purgeIfNone=True + ), + subnet_id=dict( + type='str', + updatable=False, + disposition='subnetId', + required=True + ) + ) + ), + worker_profiles=dict( + type='list', + disposition='/properties/workerProfiles', + options=dict( + name=dict( + type='str', + disposition='name', + updatable=False, + required=True, + choices=['worker'] + ), + count=dict( + type='int', + disposition='count', + updatable=False, + purgeIfNone=True + ), + vm_size=dict( + type='str', + disposition='vmSize', + updatable=False, + choices=['Standard_D4s_v3', + 'Standard_D8s_v3'], + purgeIfNone=True + ), + subnet_id=dict( + type='str', + disposition='subnetId', + updatable=False, + required=True + ), + disk_size=dict( + type='int', + disposition='diskSizeGB', + updatable=False, + purgeIfNone=True + ) + ) + ), + api_server_profile=dict( + type='dict', + disposition='/properties/apiserverProfile', + options=dict( + visibility=dict( + type='str', + disposition='visibility', + choices=['Public', 'Private'], + default='Public' + ), + url=dict( + type='str', + disposition='*', + updatable=False + ), + ip=dict( + type='str', + disposition='*', + updatable=False + ) + ) + ), + ingress_profiles=dict( + type='list', + disposition='/properties/ingressProfiles', + options=dict( + name=dict( + type='str', + disposition='name', + updatable=False, + choices=['default'], + default='default' + ), + visibility=dict( + type='str', + disposition='visibility', + updatable=False, + choices=['Public', 'Private'], + default='Public' + ), + ip=dict( + type='str', + disposition='*', + updatable=False + ) + ) + ), + provisioning_state=dict( + type='str', + disposition='/properties/provisioningState' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.name = None + + self.results = dict(changed=False) + self.mgmt_client = None + self.state = None + self.url = None + self.status_code = [200, 201, 202] + self.to_do = Actions.NoAction + + self.body = {} + self.query_parameters = {} + self.header_parameters = {} + + self.query_parameters['api-version'] = '2020-04-30' + self.header_parameters['Content-Type'] = 'application/json; charset=utf-8' + + super(AzureRMOpenShiftManagedClusters, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()) + ['tags']: + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + self.body[key] = kwargs[key] + + self.inflate_parameters(self.module_arg_spec, self.body, 0) + response = None + + self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient, + base_url=self._cloud_environment.endpoints.resource_manager) + + self.url = ('/subscriptions' + + '/{{ subscription_id }}' + + '/resourceGroups' + + '/{{ resource_group }}' + + '/providers' + + '/Microsoft.RedHatOpenShift' + + '/openShiftClusters' + + '/{{ open_shift_managed_cluster_name }}') + self.url = self.url.replace('{{ subscription_id }}', self.subscription_id) + self.url = self.url.replace('{{ resource_group }}', self.resource_group) + self.url = self.url.replace('{{ open_shift_managed_cluster_name }}', self.name) + + old_response = self.get_resource() + + if not old_response: + self.log("OpenShiftManagedCluster instance doesn't exist") + + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + self.log('OpenShiftManagedCluster instance already exists') + + if self.state == 'absent': + self.to_do = Actions.Delete + else: + modifiers = {} + # self.create_compare_modifiers(self.module_arg_spec, '', modifiers) + # self.results['modifiers'] = modifiers + # self.results['compare'] = [] + # if 'workProfiles' in self.body['properties']: + # self.body['properties'].pop('workerProfiles') + # if not self.default_compare(modifiers, self.body, old_response, '', self.results): + # self.to_do = Actions.Update + self.fail("module doesn't support cluster update yet") + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log('Need to Create / Update the OpenShiftManagedCluster instance') + + if self.check_mode: + self.results['changed'] = True + return self.results + + response = self.create_update_resource() + + self.results['changed'] = True + self.log('Creation / Update done') + elif self.to_do == Actions.Delete: + self.log('OpenShiftManagedCluster instance deleted') + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_resource() + + # make sure instance is actually deleted, for some Azure resources, instance is hanging around + # for some time after deletion -- this should be really fixed in Azure + while self.get_resource(): + time.sleep(20) + else: + self.log('OpenShiftManagedCluster instance unchanged') + self.results['changed'] = False + response = old_response + + if response: + self.results["id"] = response["id"] + self.results["name"] = response["name"] + self.results["type"] = response["type"] + self.results["location"] = response["location"] + self.results["properties"] = response["properties"] + + return self.results + + def create_update_resource(self): + + if self.to_do == Actions.Create: + required_profile_for_creation = ["workerProfiles", "clusterProfile", "servicePrincipalProfile", "masterProfile"] + + if 'properties' not in self.body: + self.fail('{0} are required for creating a openshift cluster'.format( + '[worker_profile, cluster_profile, service_principal_profile, master_profile]')) + for profile in required_profile_for_creation: + if profile not in self.body['properties']: + self.fail('{0} is required for creating a openshift cluster'.format(profile)) + + self.set_default() + + try: + response = self.mgmt_client.query(self.url, + 'PUT', + self.query_parameters, + self.header_parameters, + self.body, + self.status_code, + 600, + 30) + except CloudError as exc: + self.log('Error attempting to create the OpenShiftManagedCluster instance.') + self.fail('Error creating the OpenShiftManagedCluster instance: {0}' + '\n{1}'.format(str(self.body), str(exc))) + try: + response = json.loads(response.text) + except Exception: + response = {'text': response.text} + pass + + return response + + def delete_resource(self): + # self.log('Deleting the OpenShiftManagedCluster instance {0}'.format(self.)) + try: + response = self.mgmt_client.query(self.url, + 'DELETE', + self.query_parameters, + self.header_parameters, + None, + self.status_code, + 600, + 30) + except CloudError as e: + self.log('Error attempting to delete the OpenShiftManagedCluster instance.') + self.fail('Error deleting the OpenShiftManagedCluster instance: {0}'.format(str(e))) + + return True + + def get_resource(self): + # self.log('Checking if the OpenShiftManagedCluster instance {0} is present'.format(self.)) + found = False + try: + response = self.mgmt_client.query(self.url, + 'GET', + self.query_parameters, + self.header_parameters, + None, + self.status_code, + 600, + 30) + found = True + response = json.loads(response.text) + found = True + self.log("Response : {0}".format(response)) + # self.log("OpenShiftManagedCluster instance : {0} found".format(response.name)) + except CloudError as e: + self.log('Did not find the OpenShiftManagedCluster instance.') + if found is True: + return response + + return False + +# def random_id(self): +# import random +# return ''.join(random.choice('abcdefghijklmnopqrstuvwxyz0123456789') for _ in range(8)) + +# Added per Mangirdas Judeikis (RED HAT INC) to fix first letter of cluster domain beginning with digit ; currently not supported + def random_id(self): + random_id = (''.join(random.choice('abcdefghijklmnopqrstuvwxyz')) + + ''.join(random.choice('abcdefghijklmnopqrstuvwxyz1234567890') + for key in range(7))) + return random_id +### + + def set_default(self): + if 'apiserverProfile' not in self.body['properties']: + api_profile = dict(visibility="Public") + self.body['properties']['apiserverProfile'] = api_profile + if 'ingressProfiles' not in self.body['properties']: + ingress_profile = dict(visibility="Public", name="default") + self.body['properties']['ingressProfiles'] = [ingress_profile] + else: + # hard code the ingress profile name as default, so user don't need to specify it + for profile in self.body['properties']['ingressProfiles']: + profile['name'] = "default" + if 'name' not in self.body['properties']['workerProfiles'][0]: + self.body['properties']['workerProfiles'][0]['name'] = 'worker' + if 'vmSize' not in self.body['properties']['workerProfiles'][0]: + self.body['properties']['workerProfiles'][0]['vmSize'] = "Standard_D4s_v3" + if 'diskSizeGB' not in self.body['properties']['workerProfiles'][0]: + self.body['properties']['workerProfiles'][0]['diskSizeGB'] = 128 + if 'vmSize' not in self.body['properties']['masterProfile']: + self.body['properties']['masterProfile']['vmSize'] = "Standard_D8s_v3" + if 'pullSecret' not in self.body['properties']['clusterProfile']: + self.body['properties']['clusterProfile']['pullSecret'] = '' + if 'resourceGroupId' not in self.body['properties']['clusterProfile']: + resourcegroup_id = "/subscriptions/" + self.subscription_id + "/resourceGroups/" + self.name + "-cluster" + self.body['properties']['clusterProfile']['resourceGroupId'] = resourcegroup_id + # if domain is not set in cluster profile or it is set to an empty string or null value then generate a random domain + if 'domain' not in self.body['properties']['clusterProfile'] or not self.body['properties']['clusterProfile']['domain']: + self.body['properties']['clusterProfile']['domain'] = self.random_id() + + +def main(): + AzureRMOpenShiftManagedClusters() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_openshiftmanagedcluster_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_openshiftmanagedcluster_info.py new file mode 100644 index 000000000..0070bd0d3 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_openshiftmanagedcluster_info.py @@ -0,0 +1,383 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 haiyuazhang +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_openshiftmanagedcluster_info +version_added: '1.11.0' +short_description: Get Info onf Azure Red Hat OpenShift Managed Cluster +description: + - Create, update and delete instance of Azure Red Hat OpenShift Managed Cluster instance. +options: + resource_group: + description: + - The name of the resource group. + required: false + type: str + name: + description: + - Resource name. + required: false + type: str +extends_documentation_fragment: + - azure.azcollection.azure +author: + - Paul Czarkowski (@paulczar) +''' + +EXAMPLES = ''' +- name: List all Azure Red Hat OpenShift Managed Clusters for a given subscription + azure_rm_openshiftmanagedcluster_info: +- name: List all Azure Red Hat OpenShift Managed Clusters for a given resource group + azure_rm_openshiftmanagedcluster_info: + resource_group: myResourceGroup +- name: Get Azure Red Hat OpenShift Managed Clusters + azure_rm_openshiftmanagedcluster_info: + resource_group: myResourceGroup + name: myAzureFirewall +''' + +RETURN = ''' +id: + description: + - Resource ID. + returned: always + type: str + sample: /subscriptions/xx-xx-xx-xx/resourceGroups/mycluster-eastus/providers/Microsoft.RedHatOpenShift/openShiftClusters/mycluster +name: + description: + - Resource name. + returned: always + type: str + sample: mycluster +type: + description: + - Resource type. + returned: always + type: str + sample: Microsoft.RedHatOpenShift/openShiftClusters +location: + description: + - Resource location. + returned: always + type: str + sample: eatus +properties: + description: + - Properties of a OpenShift managed cluster. + returned: always + type: complex + sample: null + contains: + provisioningState: + description: + - The current deployment or provisioning state, which only appears in the response. + returned: always + type: str + sample: Creating + clusterProfile: + description: + - Configuration for Openshift cluster. + returned: always + type: complex + contains: + domain: + description: + - Domain for the cluster. + returned: always + type: str + sample: mycluster + version: + description: + - Openshift version. + returned: always + type: str + sample: 4.4.17 + resourceGroupId: + description: + - The ID of the cluster resource group. + returned: always + type: str + sample: /subscriptions/xx-xx-xx-xx/resourceGroups/mycluster-eastus-cluster + servicePrincipalProfile: + description: + - Service principal. + type: complex + returned: always + contains: + clientId: + description: Client ID of the service principal. + returned: always + type: str + sample: xxxxxxxx-xxxx-xxxx-xxxxxxxxxxxx + networkProfile: + description: + - Configuration for OpenShift networking. + returned: always + type: complex + contains: + podCidr: + description: + - CIDR for the OpenShift Pods. + returned: always + type: str + sample: 10.128.0.0/14 + serviceCidr: + description: + - CIDR for OpenShift Services. + type: str + returned: always + sample: 172.30.0.0/16 + masterProfile: + description: + - Configuration for OpenShift master VMs. + returned: always + type: complex + contains: + vmSize: + description: + - Size of agent VMs (immutable). + type: str + returned: always + sample: Standard_D8s_v3 + subnetId: + description: + - The Azure resource ID of the master subnet (immutable). + type: str + returned: always + sample: /subscriptions/xx-xx-xx-xx/resourceGroups/mycluster-eastus/providers/Microsoft.Network/ + virtualNetworks/mycluster-vnet/subnets/mycluster-worker + workerProfiles: + description: + - Configuration of OpenShift cluster VMs. + returned: always + type: complex + contains: + name: + description: + - Unique name of the pool profile in the context of the subscription and resource group. + returned: always + type: str + sample: worker + count: + description: + - Number of agents (VMs) to host docker containers. + returned: always + type: int + sample: 3 + vmSize: + description: + - Size of agent VMs. + returned: always + type: str + sample: Standard_D4s_v3 + diskSizeGB: + description: + - disk size in GB. + returned: always + type: int + sample: 128 + subnetId: + description: + - Subnet ID for worker pool. + returned: always + type: str + sample: /subscriptions/xx-xx-xx-xx/resourceGroups/mycluster-eastus/providers/Microsoft.Network/ + virtualNetworks/mycluster-vnet/subnets/mycluster-worker + ingressProfiles: + description: + - Ingress configruation. + returned: always + type: list + sample: [{"name": "default", "visibility": "Public"}, ] + apiserverProfile: + description: + - API server configuration. + returned: always + type: complex + contains: + visibility: + description: + - api server visibility. + returned: always + type: str + sample: Public +''' + +import time +import json +import random +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_rest import GenericRestClient +try: + from msrestazure.azure_exceptions import CloudError +except ImportError: + # this is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMOpenShiftManagedClustersInfo(AzureRMModuleBaseExt): + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str' + ), + name=dict( + type='str' + ) + ) + + self.resource_group = None + self.name = None + + self.results = dict(changed=False) + self.mgmt_client = None + self.state = None + self.url = None + self.status_code = [200] + + self.query_parameters = {} + self.query_parameters['api-version'] = '2020-04-30' + self.header_parameters = {} + self.header_parameters['Content-Type'] = 'application/json; charset=utf-8' + + self.mgmt_client = None + super(AzureRMOpenShiftManagedClustersInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient, + base_url=self._cloud_environment.endpoints.resource_manager) + + if (self.resource_group is not None and self.name is not None): + self.results['clusters'] = self.get() + elif (self.resource_group is not None): + self.results['clusters'] = self.list() + else: + self.results['clusters'] = self.listall() + return self.results + + def get(self): + response = None + results = {} + # prepare url + self.url = ('/subscriptions' + + '/{{ subscription_id }}' + + '/resourceGroups' + + '/{{ resource_group }}' + + '/providers' + + '/Microsoft.RedHatOpenShift' + + '/openShiftClusters' + + '/{{ open_shift_managed_cluster_name }}') + self.url = self.url.replace('{{ subscription_id }}', self.subscription_id) + self.url = self.url.replace('{{ resource_group }}', self.resource_group) + self.url = self.url.replace('{{ open_shift_managed_cluster_name }}', self.name) + + try: + response = self.mgmt_client.query(self.url, + 'GET', + self.query_parameters, + self.header_parameters, + None, + self.status_code, + 600, + 30) + results = json.loads(response.text) + # self.log('Response : {0}'.format(response)) + except CloudError as e: + self.log('Could not get info for @(Model.ModuleOperationNameUpper).') + return {} + + return self.format_item(results) + + def list(self): + response = None + results = {} + # prepare url + self.url = ('/subscriptions' + + '/{{ subscription_id }}' + + '/resourceGroups' + + '/{{ resource_group }}' + + '/providers' + + '/Microsoft.RedHatOpenShift' + + '/openShiftClusters') + self.url = self.url.replace('{{ subscription_id }}', self.subscription_id) + self.url = self.url.replace('{{ resource_group }}', self.resource_group) + + try: + response = self.mgmt_client.query(self.url, + 'GET', + self.query_parameters, + self.header_parameters, + None, + self.status_code, + 600, + 30) + results = json.loads(response.text) + # self.log('Response : {0}'.format(response)) + except CloudError as e: + self.log('Could not get info for @(Model.ModuleOperationNameUpper).') + + return [self.format_item(x) for x in results['value']] if results['value'] else [] + + def listall(self): + response = None + results = {} + # prepare url + self.url = ('/subscriptions' + + '/{{ subscription_id }}' + + '/providers' + + '/Microsoft.RedHatOpenShift' + + '/openShiftClusters') + self.url = self.url.replace('{{ subscription_id }}', self.subscription_id) + + try: + response = self.mgmt_client.query(self.url, + 'GET', + self.query_parameters, + self.header_parameters, + None, + self.status_code, + 600, + 30) + results = json.loads(response.text) + # self.log('Response : {0}'.format(response)) + except CloudError as e: + self.log('Could not get info for @(Model.ModuleOperationNameUpper).') + return results + # return [self.format_item(x) for x in results['value']] if results['value'] else [] + + def format_item(self, item): + return item + # d = { + # 'id': item['id'], + # 'name': item['name'], + # 'location': item['location'], + # 'tags': item.get('tags'), + # 'api_server_url': item['properties']['apiserverProfile']['url'], + # 'console_url': item['properties']['consoleProfile']['url'], + # 'provisioning_state': item['properties']['provisioningState'] + # } + # return d + + +def main(): + AzureRMOpenShiftManagedClustersInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlconfiguration.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlconfiguration.py new file mode 100644 index 000000000..4d95b5b71 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlconfiguration.py @@ -0,0 +1,234 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Zim Kalinowski, (@zikalino) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_postgresqlconfiguration +version_added: "0.1.2" +short_description: Manage Azure PostgreSQL Configuration +description: + - Update or reset Azure PostgreSQL Configuration setting. + +options: + resource_group: + description: + - The name of the resource group that contains the resource. + required: True + server_name: + description: + - The name of the server. + required: True + name: + description: + - Setting name. + required: True + value: + description: + - Setting value. + state: + description: + - Assert the state of the PostgreSQL setting. Use C(present) to update setting, or C(absent) to reset to default value. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Update PostgreSQL Server setting + azure_rm_postgresqlconfiguration: + resource_group: myResourceGroup + server_name: myServer + name: deadlock_timeout + value: 2000 +''' + +RETURN = ''' +id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforPostgreSQL/servers/myServer/confi + gurations/event_scheduler" +''' + +import time + +try: + from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + from msrest.serialization import Model + from azure.core.exceptions import ResourceNotFoundError + from azure.core.polling import LROPoller +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMPostgreSqlConfigurations(AzureRMModuleBase): + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + server_name=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + value=dict( + type='str' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.server_name = None + self.name = None + self.value = None + + self.results = dict(changed=False) + self.state = None + self.to_do = Actions.NoAction + + super(AzureRMPostgreSqlConfigurations, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=False) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()): + if hasattr(self, key): + setattr(self, key, kwargs[key]) + + old_response = None + response = None + + old_response = self.get_configuration() + + if not old_response: + self.log("Configuration instance doesn't exist") + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + self.log("Configuration instance already exists") + if self.state == 'absent' and old_response['source'] == 'user-override': + self.to_do = Actions.Delete + elif self.state == 'present': + self.log("Need to check if Configuration instance has to be deleted or may be updated") + if self.value != old_response.get('value'): + self.to_do = Actions.Update + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log("Need to Create / Update the Configuration instance") + + if self.check_mode: + self.results['changed'] = True + return self.results + + response = self.create_update_configuration() + + self.results['changed'] = True + self.log("Creation / Update done") + elif self.to_do == Actions.Delete: + self.log("Configuration instance deleted") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_configuration() + else: + self.log("Configuration instance unchanged") + self.results['changed'] = False + response = old_response + + if response: + self.results["id"] = response["id"] + + return self.results + + def create_update_configuration(self): + self.log("Creating / Updating the Configuration instance {0}".format(self.name)) + + try: + response = self.postgresql_client.configurations.begin_create_or_update(resource_group_name=self.resource_group, + server_name=self.server_name, + configuration_name=self.name, + parameters=self.value, + source='user-override') + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + + except Exception as exc: + self.log('Error attempting to create the Configuration instance.') + self.fail("Error creating the Configuration instance: {0}".format(str(exc))) + return response.as_dict() + + def delete_configuration(self): + self.log("Deleting the Configuration instance {0}".format(self.name)) + try: + response = self.postgresql_client.configurations.begin_create_or_update(resource_group_name=self.resource_group, + server_name=self.server_name, + configuration_name=self.name, + source='system-default') + except Exception as e: + self.log('Error attempting to delete the Configuration instance.') + self.fail("Error deleting the Configuration instance: {0}".format(str(e))) + + return True + + def get_configuration(self): + self.log("Checking if the Configuration instance {0} is present".format(self.name)) + found = False + try: + response = self.postgresql_client.configurations.get(resource_group_name=self.resource_group, + server_name=self.server_name, + configuration_name=self.name) + found = True + self.log("Response : {0}".format(response)) + self.log("Configuration instance : {0} found".format(response.name)) + except ResourceNotFoundError as e: + self.log('Did not find the Configuration instance.') + if found is True: + return response.as_dict() + + return False + + +def main(): + """Main execution""" + AzureRMPostgreSqlConfigurations() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlconfiguration_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlconfiguration_info.py new file mode 100644 index 000000000..38a97cccb --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlconfiguration_info.py @@ -0,0 +1,207 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Zim Kalinowski, (@zikalino) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_postgresqlconfiguration_info +version_added: "0.1.2" +short_description: Get Azure PostgreSQL Configuration facts +description: + - Get facts of Azure PostgreSQL Configuration. + +options: + resource_group: + description: + - The name of the resource group that contains the resource. + required: True + type: str + server_name: + description: + - The name of the server. + required: True + type: str + name: + description: + - Setting name. + type: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Get specific setting of PostgreSQL configuration + azure_rm_postgresqlconfiguration_info: + resource_group: myResourceGroup + server_name: testpostgresqlserver + name: deadlock_timeout + + - name: Get all settings of PostgreSQL Configuration + azure_rm_postgresqlconfiguration_info: + resource_group: myResourceGroup + server_name: testpostgresqlserver +''' + +RETURN = ''' +settings: + description: + - A list of dictionaries containing MySQL Server settings. + returned: always + type: complex + contains: + id: + description: + - Setting resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/testrg/providers/Microsoft.DBforPostgreSQL/servers/testpostgresqlser + ver/configurations/deadlock_timeout" + name: + description: + - Setting name. + returned: always + type: str + sample: deadlock_timeout + value: + description: + - Setting value. + returned: always + type: raw + sample: 1000 + description: + description: + - Description of the configuration. + returned: always + type: str + sample: Deadlock timeout. + source: + description: + - Source of the configuration. + returned: always + type: str + sample: system-default +''' + +try: + from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + from azure.core.exceptions import ResourceNotFoundError + from msrestazure.azure_operation import AzureOperationPoller + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMPostgreSQLConfigurationInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + server_name=dict( + type='str', + required=True + ), + name=dict( + type='str' + ) + ) + # store the results of the module operation + self.results = dict( + changed=False + ) + self.resource_group = None + self.server_name = None + self.name = None + super(AzureRMPostgreSQLConfigurationInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_postgresqlconfiguration_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_postgresqlconfiguration_facts' module has been renamed to 'azure_rm_postgresqlconfiguration_info'", + version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.name is not None: + self.results['settings'] = self.get() + else: + self.results['settings'] = self.list_by_server() + return self.results + + def get(self): + ''' + Gets facts of the specified PostgreSQL Configuration. + + :return: deserialized PostgreSQL Configurationinstance state dictionary + ''' + response = None + results = [] + try: + response = self.postgresql_client.configurations.get(resource_group_name=self.resource_group, + server_name=self.server_name, + configuration_name=self.name) + self.log("Response : {0}".format(response)) + except ResourceNotFoundError as e: + self.fail('Could not get requested setting.') + + if response is not None: + results.append(self.format_item(response)) + + return results + + def list_by_server(self): + ''' + Gets facts of the specified PostgreSQL Configuration. + + :return: deserialized PostgreSQL Configurationinstance state dictionary + ''' + response = None + results = [] + try: + response = self.postgresql_client.configurations.list_by_server(resource_group_name=self.resource_group, + server_name=self.server_name) + self.log("Response : {0}".format(response)) + except Exception as e: + self.fail('Could not get settings for server.') + + if response is not None: + for item in response: + results.append(self.format_item(item)) + + return results + + def format_item(self, item): + d = item.as_dict() + d = { + 'resource_group': self.resource_group, + 'server_name': self.server_name, + 'id': d['id'], + 'name': d['name'], + 'value': d['value'], + 'description': d['description'], + 'source': d['source'] + } + return d + + +def main(): + AzureRMPostgreSQLConfigurationInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqldatabase.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqldatabase.py new file mode 100644 index 000000000..4c54e6db7 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqldatabase.py @@ -0,0 +1,294 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Zim Kalinowski, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_postgresqldatabase +version_added: "0.1.2" +short_description: Manage PostgreSQL Database instance +description: + - Create, update and delete instance of PostgreSQL Database. + +options: + resource_group: + description: + - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. + required: True + server_name: + description: + - The name of the server. + required: True + name: + description: + - The name of the database. + required: True + charset: + description: + - The charset of the database. Check PostgreSQL documentation for possible values. + - This is only set on creation, use I(force_update) to recreate a database if the values don't match. + collation: + description: + - The collation of the database. Check PostgreSQL documentation for possible values. + - This is only set on creation, use I(force_update) to recreate a database if the values don't match. + force_update: + description: + - When set to C(true), will delete and recreate the existing PostgreSQL database if any of the properties don't match what is set. + - When set to C(false), no change will occur to the database even if any of the properties do not match. + type: bool + default: 'no' + state: + description: + - Assert the state of the PostgreSQL database. Use C(present) to create or update a database and C(absent) to delete it. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Create (or update) PostgreSQL Database + azure_rm_postgresqldatabase: + resource_group: myResourceGroup + server_name: testserver + name: db1 +''' + +RETURN = ''' +id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroups/providers/Microsoft.DBforPostgreSQL/servers/testserve + r/databases/db1" +name: + description: + - Resource name. + returned: always + type: str + sample: db1 +''' + +import time + +try: + from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + from azure.mgmt.rdbms.postgresql import PostgreSQLManagementClient + from azure.core.exceptions import ResourceNotFoundError + from azure.core.polling import LROPoller + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMPostgreSqlDatabases(AzureRMModuleBase): + """Configuration class for an Azure RM PostgreSQL Database resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + server_name=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + charset=dict( + type='str' + ), + collation=dict( + type='str' + ), + force_update=dict( + type='bool', + default=False + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.server_name = None + self.name = None + self.force_update = None + self.parameters = dict() + + self.results = dict(changed=False) + self.state = None + self.to_do = Actions.NoAction + + super(AzureRMPostgreSqlDatabases, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=False) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()): + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + if key == "charset": + self.parameters["charset"] = kwargs[key] + elif key == "collation": + self.parameters["collation"] = kwargs[key] + + old_response = None + response = None + + resource_group = self.get_resource_group(self.resource_group) + + old_response = self.get_postgresqldatabase() + + if not old_response: + self.log("PostgreSQL Database instance doesn't exist") + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + self.log("PostgreSQL Database instance already exists") + if self.state == 'absent': + self.to_do = Actions.Delete + elif self.state == 'present': + self.log("Need to check if PostgreSQL Database instance has to be deleted or may be updated") + if ('collation' in self.parameters) and (self.parameters['collation'] != old_response['collation']): + self.to_do = Actions.Update + if ('charset' in self.parameters) and (self.parameters['charset'] != old_response['charset']): + self.to_do = Actions.Update + if self.to_do == Actions.Update: + if self.force_update: + if not self.check_mode: + self.delete_postgresqldatabase() + else: + self.fail("Database properties cannot be updated without setting 'force_update' option") + self.to_do = Actions.NoAction + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log("Need to Create / Update the PostgreSQL Database instance") + + if self.check_mode: + self.results['changed'] = True + return self.results + + response = self.create_update_postgresqldatabase() + self.results['changed'] = True + self.log("Creation / Update done") + elif self.to_do == Actions.Delete: + self.log("PostgreSQL Database instance deleted") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_postgresqldatabase() + # make sure instance is actually deleted, for some Azure resources, instance is hanging around + # for some time after deletion -- this should be really fixed in Azure + while self.get_postgresqldatabase(): + time.sleep(20) + else: + self.log("PostgreSQL Database instance unchanged") + self.results['changed'] = False + response = old_response + + if response: + self.results["id"] = response["id"] + self.results["name"] = response["name"] + + return self.results + + def create_update_postgresqldatabase(self): + ''' + Creates or updates PostgreSQL Database with the specified configuration. + + :return: deserialized PostgreSQL Database instance state dictionary + ''' + self.log("Creating / Updating the PostgreSQL Database instance {0}".format(self.name)) + + try: + response = self.postgresql_client.databases.begin_create_or_update(resource_group_name=self.resource_group, + server_name=self.server_name, + database_name=self.name, + parameters=self.parameters) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + + except Exception as exc: + self.log('Error attempting to create the PostgreSQL Database instance.') + self.fail("Error creating the PostgreSQL Database instance: {0}".format(str(exc))) + return response.as_dict() + + def delete_postgresqldatabase(self): + ''' + Deletes specified PostgreSQL Database instance in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the PostgreSQL Database instance {0}".format(self.name)) + try: + response = self.postgresql_client.databases.begin_delete(resource_group_name=self.resource_group, + server_name=self.server_name, + database_name=self.name) + except Exception as e: + self.log('Error attempting to delete the PostgreSQL Database instance.') + self.fail("Error deleting the PostgreSQL Database instance: {0}".format(str(e))) + + return True + + def get_postgresqldatabase(self): + ''' + Gets the properties of the specified PostgreSQL Database. + + :return: deserialized PostgreSQL Database instance state dictionary + ''' + self.log("Checking if the PostgreSQL Database instance {0} is present".format(self.name)) + found = False + try: + response = self.postgresql_client.databases.get(resource_group_name=self.resource_group, + server_name=self.server_name, + database_name=self.name) + found = True + self.log("Response : {0}".format(response)) + self.log("PostgreSQL Database instance : {0} found".format(response.name)) + except ResourceNotFoundError as e: + self.log('Did not find the PostgreSQL Database instance.') + if found is True: + return response.as_dict() + + return False + + +def main(): + """Main execution""" + AzureRMPostgreSqlDatabases() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqldatabase_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqldatabase_info.py new file mode 100644 index 000000000..d70fbd54b --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqldatabase_info.py @@ -0,0 +1,202 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Zim Kalinowski, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_postgresqldatabase_info +version_added: "0.1.2" +short_description: Get Azure PostgreSQL Database facts +description: + - Get facts of PostgreSQL Database. + +options: + resource_group: + description: + - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. + required: True + type: str + server_name: + description: + - The name of the server. + required: True + type: str + name: + description: + - The name of the database. + type: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Get instance of PostgreSQL Database + azure_rm_postgresqldatabase_info: + resource_group: myResourceGroup + server_name: server_name + name: database_name + + - name: List instances of PostgreSQL Database + azure_rm_postgresqldatabase_info: + resource_group: myResourceGroup + server_name: server_name +''' + +RETURN = ''' +databases: + description: + - A list of dict results where the key is the name of the PostgreSQL Database and the values are the facts for that PostgreSQL Database. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforPostgreSQL/servers/testser + ver/databases/db1" + resource_group: + description: + - Resource group name. + returned: always + type: str + sample: testrg + server_name: + description: + - Server name. + returned: always + type: str + sample: testserver + name: + description: + - Resource name. + returned: always + type: str + sample: db1 + charset: + description: + - The charset of the database. + returned: always + type: str + sample: UTF8 + collation: + description: + - The collation of the database. + returned: always + type: str + sample: English_United States.1252 +''' + +try: + from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + from azure.core.exceptions import ResourceNotFoundError + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMPostgreSqlDatabasesInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + server_name=dict( + type='str', + required=True + ), + name=dict( + type='str' + ) + ) + # store the results of the module operation + self.results = dict( + changed=False + ) + self.resource_group = None + self.server_name = None + self.name = None + super(AzureRMPostgreSqlDatabasesInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_postgresqldatabase_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_postgresqldatabase_facts' module has been renamed to 'azure_rm_postgresqldatabase_info'", version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if (self.resource_group is not None and + self.server_name is not None and + self.name is not None): + self.results['databases'] = self.get() + elif (self.resource_group is not None and + self.server_name is not None): + self.results['databases'] = self.list_by_server() + return self.results + + def get(self): + response = None + results = [] + try: + response = self.postgresql_client.databases.get(resource_group_name=self.resource_group, + server_name=self.server_name, + database_name=self.name) + self.log("Response : {0}".format(response)) + except ResourceNotFoundError as e: + self.log('Could not get facts for Databases.') + + if response is not None: + results.append(self.format_item(response)) + + return results + + def list_by_server(self): + response = None + results = [] + try: + response = self.postgresql_client.databases.list_by_server(resource_group_name=self.resource_group, + server_name=self.server_name) + self.log("Response : {0}".format(response)) + except Exception as e: + self.fail("Error listing for server {0} - {1}".format(self.server_name, str(e))) + + if response is not None: + for item in response: + results.append(self.format_item(item)) + + return results + + def format_item(self, item): + d = item.as_dict() + d = { + 'resource_group': self.resource_group, + 'server_name': self.server_name, + 'name': d['name'], + 'charset': d['charset'], + 'collation': d['collation'] + } + return d + + +def main(): + AzureRMPostgreSqlDatabasesInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlfirewallrule.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlfirewallrule.py new file mode 100644 index 000000000..4eb702e19 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlfirewallrule.py @@ -0,0 +1,271 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Zim Kalinowski, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_postgresqlfirewallrule +version_added: "0.1.2" +short_description: Manage PostgreSQL firewall rule instance +description: + - Create, update and delete instance of PostgreSQL firewall rule. + +options: + resource_group: + description: + - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. + required: True + server_name: + description: + - The name of the server. + required: True + name: + description: + - The name of the PostgreSQL firewall rule. + required: True + start_ip_address: + description: + - The start IP address of the PostgreSQL firewall rule. Must be IPv4 format. + end_ip_address: + description: + - The end IP address of the PostgreSQL firewall rule. Must be IPv4 format. + state: + description: + - Assert the state of the PostgreSQL firewall rule. Use C(present) to create or update a PostgreSQL firewall rule and C(absent) to delete it. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Create (or update) PostgreSQL firewall rule + azure_rm_postgresqlfirewallrule: + resource_group: myResourceGroup + server_name: testserver + name: rule1 + start_ip_address: 10.0.0.16 + end_ip_address: 10.0.0.18 +''' + +RETURN = ''' +id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforPostgreSQL/servers/testserver + /firewallRules/rule1" +''' + +import time + +try: + from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + from azure.core.exceptions import ResourceNotFoundError + from azure.core.polling import LROPoller + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMPostgreSqlFirewallRules(AzureRMModuleBase): + """Configuration class for an Azure RM PostgreSQL firewall rule resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + server_name=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + start_ip_address=dict( + type='str' + ), + end_ip_address=dict( + type='str' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.server_name = None + self.name = None + self.start_ip_address = None + self.end_ip_address = None + + self.results = dict(changed=False) + self.state = None + self.parameters = dict() + self.to_do = Actions.NoAction + + super(AzureRMPostgreSqlFirewallRules, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=False) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()): + if hasattr(self, key): + setattr(self, key, kwargs[key]) + if key in ['start_ip_address', 'end_ip_address']: + self.parameters[key] = kwargs[key] + + old_response = None + response = None + + resource_group = self.get_resource_group(self.resource_group) + + old_response = self.get_firewallrule() + + if not old_response: + self.log("PostgreSQL firewall rule instance doesn't exist") + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + self.log("PostgreSQL firewall rule instance already exists") + if self.state == 'absent': + self.to_do = Actions.Delete + elif self.state == 'present': + self.log("Need to check if PostgreSQL firewall rule instance has to be deleted or may be updated") + if (self.start_ip_address is not None) and (self.start_ip_address != old_response['start_ip_address']): + self.to_do = Actions.Update + if (self.end_ip_address is not None) and (self.end_ip_address != old_response['end_ip_address']): + self.to_do = Actions.Update + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log("Need to Create / Update the PostgreSQL firewall rule instance") + + if self.check_mode: + self.results['changed'] = True + return self.results + + response = self.create_update_firewallrule() + + if not old_response: + self.results['changed'] = True + else: + self.results['changed'] = old_response.__ne__(response) + self.log("Creation / Update done") + elif self.to_do == Actions.Delete: + self.log("PostgreSQL firewall rule instance deleted") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_firewallrule() + # make sure instance is actually deleted, for some Azure resources, instance is hanging around + # for some time after deletion -- this should be really fixed in Azure + while self.get_firewallrule(): + time.sleep(20) + else: + self.log("PostgreSQL firewall rule instance unchanged") + self.results['changed'] = False + response = old_response + + if response: + self.results["id"] = response["id"] + + return self.results + + def create_update_firewallrule(self): + ''' + Creates or updates PostgreSQL firewall rule with the specified configuration. + + :return: deserialized PostgreSQL firewall rule instance state dictionary + ''' + self.log("Creating / Updating the PostgreSQL firewall rule instance {0}".format(self.name)) + + try: + response = self.postgresql_client.firewall_rules.begin_create_or_update(resource_group_name=self.resource_group, + server_name=self.server_name, + firewall_rule_name=self.name, + parameters=self.parameters) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + + except Exception as exc: + self.log('Error attempting to create the PostgreSQL firewall rule instance.') + self.fail("Error creating the PostgreSQL firewall rule instance: {0}".format(str(exc))) + return response.as_dict() + + def delete_firewallrule(self): + ''' + Deletes specified PostgreSQL firewall rule instance in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the PostgreSQL firewall rule instance {0}".format(self.name)) + try: + response = self.postgresql_client.firewall_rules.begin_delete(resource_group_name=self.resource_group, + server_name=self.server_name, + firewall_rule_name=self.name) + except Exception as e: + self.log('Error attempting to delete the PostgreSQL firewall rule instance.') + self.fail("Error deleting the PostgreSQL firewall rule instance: {0}".format(str(e))) + + return True + + def get_firewallrule(self): + ''' + Gets the properties of the specified PostgreSQL firewall rule. + + :return: deserialized PostgreSQL firewall rule instance state dictionary + ''' + self.log("Checking if the PostgreSQL firewall rule instance {0} is present".format(self.name)) + found = False + try: + response = self.postgresql_client.firewall_rules.get(resource_group_name=self.resource_group, + server_name=self.server_name, + firewall_rule_name=self.name) + found = True + self.log("Response : {0}".format(response)) + self.log("PostgreSQL firewall rule instance : {0} found".format(response.name)) + except ResourceNotFoundError as e: + self.log('Did not find the PostgreSQL firewall rule instance.') + if found is True: + return response.as_dict() + + return False + + +def main(): + """Main execution""" + AzureRMPostgreSqlFirewallRules() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlfirewallrule_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlfirewallrule_info.py new file mode 100644 index 000000000..578fe6576 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlfirewallrule_info.py @@ -0,0 +1,195 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Zim Kalinowski, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_postgresqlfirewallrule_info +version_added: "0.1.2" +short_description: Get Azure PostgreSQL Firewall Rule facts +description: + - Get facts of Azure PostgreSQL Firewall Rule. + +options: + resource_group: + description: + - The name of the resource group. + required: True + type: str + server_name: + description: + - The name of the server. + required: True + type: str + name: + description: + - The name of the server firewall rule. + type: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Get instance of PostgreSQL Firewall Rule + azure_rm_postgresqlfirewallrule_info: + resource_group: myResourceGroup + server_name: server_name + name: firewall_rule_name + + - name: List instances of PostgreSQL Firewall Rule + azure_rm_postgresqlfirewallrule_info: + resource_group: myResourceGroup + server_name: server_name +''' + +RETURN = ''' +rules: + description: + - A list of dictionaries containing facts for PostgreSQL Firewall Rule. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/TestGroup/providers/Microsoft.DBforPostgreSQL/servers/testserver/fire + wallRules/rule1" + server_name: + description: + - The name of the server. + returned: always + type: str + sample: testserver + name: + description: + - Resource name. + returned: always + type: str + sample: rule1 + start_ip_address: + description: + - The start IP address of the PostgreSQL firewall rule. + returned: always + type: str + sample: 10.0.0.16 + end_ip_address: + description: + - The end IP address of the PostgreSQL firewall rule. + returned: always + type: str + sample: 10.0.0.18 +''' + +try: + from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + from azure.core.exceptions import ResourceNotFoundError + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMPostgreSQLFirewallRulesInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + server_name=dict( + type='str', + required=True + ), + name=dict( + type='str' + ) + ) + # store the results of the module operation + self.results = dict( + changed=False + ) + self.resource_group = None + self.server_name = None + self.name = None + super(AzureRMPostgreSQLFirewallRulesInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_postgresqlfirewallrule_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_postgresqlfirewallrule_facts' module has been renamed to 'azure_rm_postgresqlfirewallrule_info'", + version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if (self.name is not None): + self.results['rules'] = self.get() + else: + self.results['rules'] = self.list_by_server() + return self.results + + def get(self): + response = None + results = [] + try: + response = self.postgresql_client.firewall_rules.get(resource_group_name=self.resource_group, + server_name=self.server_name, + firewall_rule_name=self.name) + self.log("Response : {0}".format(response)) + except ResourceNotFoundError as e: + self.log('Could not get facts for FirewallRules.') + + if response is not None: + results.append(self.format_item(response)) + + return results + + def list_by_server(self): + response = None + results = [] + try: + response = self.postgresql_client.firewall_rules.list_by_server(resource_group_name=self.resource_group, + server_name=self.server_name) + self.log("Response : {0}".format(response)) + except Exception as e: + self.log('Could not get facts for FirewallRules.') + + if response is not None: + for item in response: + results.append(self.format_item(item)) + + return results + + def format_item(self, item): + d = item.as_dict() + d = { + 'resource_group': self.resource_group, + 'id': d['id'], + 'server_name': self.server_name, + 'name': d['name'], + 'start_ip_address': d['start_ip_address'], + 'end_ip_address': d['end_ip_address'] + } + return d + + +def main(): + AzureRMPostgreSQLFirewallRulesInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlserver.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlserver.py new file mode 100644 index 000000000..524050565 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlserver.py @@ -0,0 +1,458 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Zim Kalinowski, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_postgresqlserver +version_added: "0.1.2" +short_description: Manage PostgreSQL Server instance +description: + - Create, update and delete instance of PostgreSQL Server. + +options: + resource_group: + description: + - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. + required: True + type: str + name: + description: + - The name of the server. + required: True + type: str + sku: + description: + - The SKU (pricing tier) of the server. + type: dict + suboptions: + name: + description: + - The name of the SKU, typically, tier + family + cores, for example C(B_Gen4_1), C(GP_Gen5_8). + tier: + description: + - The tier of the particular SKU, for example C(Basic). + choices: + - Basic + - Standard + capacity: + description: + - The scale up/out capacity, representing server's compute units. + size: + description: + - The size code, to be interpreted by resource as appropriate. + location: + description: + - Resource location. If not set, location from the resource group will be used as default. + type: str + storage_mb: + description: + - The maximum storage allowed for a server. + type: int + geo_redundant_backup: + description: + - Choose between locally redundant(default) or geo-redundant backup. This cannot be updated after first deployment + type: bool + default: False + backup_retention_days: + description: + - Backup retention period between 7 and 35 days. 7 days by default if not set + type: int + version: + description: + - Server version. + type: str + choices: + - '9.5' + - '9.6' + - '10' + - '11' + enforce_ssl: + description: + - Enable SSL enforcement. + type: bool + default: False + storage_autogrow: + description: + - Enable storage autogrow. + type: bool + default: False + admin_username: + description: + - The administrator's login name of a server. Can only be specified when the server is being created (and is required for creation). + type: str + admin_password: + description: + - The password of the administrator login. + type: str + create_mode: + description: + - Create mode of SQL Server. Blank (default), restore from geo redundant (geo_restore), or restore from point in time (point_in_time_restore). + type: str + default: default + choices: + - default + - geo_restore + - point_in_time_restore + source_server_id: + description: + - Id if the source server if I(create_mode=default). + type: str + restore_point_in_time: + description: + - Restore point creation time (ISO8601 format), specifying the time to restore from. + - Required if I(create_mode=point_in_time_restore). + type: str + state: + description: + - Assert the state of the PostgreSQL server. Use C(present) to create or update a server and C(absent) to delete it. + default: present + type: str + choices: + - present + - absent + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Create (or update) PostgreSQL Server + azure_rm_postgresqlserver: + resource_group: myResourceGroup + name: testserver + sku: + name: B_Gen5_1 + tier: Basic + location: eastus + storage_mb: 1024 + enforce_ssl: True + storage_autogrow: True + admin_username: cloudsa + admin_password: password +''' + +RETURN = ''' +id: + description: + - Resource ID. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforPostgreSQL/servers/mysqlsrv1b6dd89593 +version: + description: + - Server version. Possible values include C(9.5), C(9.6), C(10), C(11). + returned: always + type: str + sample: 9.6 +state: + description: + - A state of a server that is visible to user. Possible values include C(Ready), C(Dropping), C(Disabled). + returned: always + type: str + sample: Ready +fully_qualified_domain_name: + description: + - The fully qualified domain name of a server. + returned: always + type: str + sample: postgresqlsrv1b6dd89593.postgresql.database.azure.com +''' + +import time + +try: + from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + from azure.core.exceptions import ResourceNotFoundError + from azure.core.polling import LROPoller + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMPostgreSqlServers(AzureRMModuleBase): + """Configuration class for an Azure RM PostgreSQL Server resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + sku=dict( + type='dict' + ), + location=dict( + type='str' + ), + storage_mb=dict( + type='int' + ), + geo_redundant_backup=dict( + type='bool', + default=False + ), + backup_retention_days=dict( + type='int', + ), + version=dict( + type='str', + choices=['9.5', '9.6', '10', '11'] + ), + enforce_ssl=dict( + type='bool', + default=False + ), + storage_autogrow=dict( + type='bool', + default=False + ), + create_mode=dict( + type='str', + default='default', + choices=['default', 'geo_restore', 'point_in_time_restore'] + ), + source_server_id=dict( + type='str' + ), + restore_point_in_time=dict( + type='str' + ), + admin_username=dict( + type='str' + ), + admin_password=dict( + type='str', + no_log=True + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.name = None + self.parameters = {'properties': {'create_mode': 'default'}} + self.tags = None + + self.results = dict(changed=False) + self.state = None + self.to_do = Actions.NoAction + + super(AzureRMPostgreSqlServers, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()) + ['tags']: + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + if key == "sku": + ev = kwargs[key] + if 'tier' in ev: + if ev['tier'] == 'basic': + ev['tier'] = 'Basic' + elif ev['tier'] == 'standard': + ev['tier'] = 'Standard' + self.parameters["sku"] = ev + elif key == "location": + self.parameters["location"] = kwargs[key] + elif key == "storage_mb": + self.parameters.setdefault("properties", {}).setdefault("storage_profile", {})["storage_mb"] = \ + kwargs[key] + elif key == "storage_autogrow": + self.parameters.setdefault("properties", {}).setdefault("storage_profile", {})[ + "storage_autogrow"] = ('Enabled' if kwargs[key] + else 'Disabled') + elif key == "geo_redundant_backup": + self.parameters.setdefault("properties", {}).setdefault("storage_profile", {})[ + "geo_redundant_backup"] = \ + 'Enabled' if kwargs[key] else 'Disabled' + elif key == "backup_retention_days": + self.parameters.setdefault("properties", {}).setdefault("storage_profile", {})[ + "backup_retention_days"] = kwargs[key] + elif key == "version": + self.parameters.setdefault("properties", {})["version"] = kwargs[key] + elif key == "enforce_ssl": + self.parameters.setdefault("properties", {})["ssl_enforcement"] = 'Enabled' if kwargs[ + key] else 'Disabled' + elif key == "create_mode": + if kwargs[key] == 'default': + self.parameters["properties"]["create_mode"] = 'Default' + elif kwargs[key] == 'point_in_time_restore': + self.parameters["properties"]["create_mode"] = 'PointInTimeRestore' + elif kwargs[key] == 'geo_restore': + self.parameters["properties"]["create_mode"] = 'GeoRestore' + elif key == "admin_username": + self.parameters.setdefault("properties", {})["administrator_login"] = kwargs[key] + elif key == "admin_password": + self.parameters.setdefault("properties", {})["administrator_login_password"] = kwargs[key] + elif key == "source_server_id": + self.parameters["properties"]["source_server_id"] = kwargs[key] + elif key == "restore_point_in_time": + self.parameters["properties"]["restore_point_in_time"] = kwargs[key] + + old_response = None + response = None + + resource_group = self.get_resource_group(self.resource_group) + + if "location" not in self.parameters: + self.parameters["location"] = resource_group.location + + old_response = self.get_postgresqlserver() + + if not old_response: + self.log("PostgreSQL Server instance doesn't exist") + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + self.log("PostgreSQL Server instance already exists") + if self.state == 'absent': + self.to_do = Actions.Delete + elif self.state == 'present': + self.log("Need to check if PostgreSQL Server instance has to be deleted or may be updated") + update_tags, newtags = self.update_tags(old_response.get('tags', {})) + if update_tags: + self.tags = newtags + self.to_do = Actions.Update + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log("Need to Create / Update the PostgreSQL Server instance") + + if self.check_mode: + self.results['changed'] = True + return self.results + + response = self.create_update_postgresqlserver() + + if not old_response: + self.results['changed'] = True + else: + self.results['changed'] = old_response.__ne__(response) + self.log("Creation / Update done") + elif self.to_do == Actions.Delete: + self.log("PostgreSQL Server instance deleted") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_postgresqlserver() + # make sure instance is actually deleted, for some Azure resources, instance is hanging around + # for some time after deletion -- this should be really fixed in Azure + while self.get_postgresqlserver(): + time.sleep(20) + else: + self.log("PostgreSQL Server instance unchanged") + self.results['changed'] = False + response = old_response + + if response: + self.results["id"] = response["id"] + self.results["version"] = response["version"] + self.results["state"] = response["user_visible_state"] + self.results["fully_qualified_domain_name"] = response["fully_qualified_domain_name"] + + return self.results + + def create_update_postgresqlserver(self): + ''' + Creates or updates PostgreSQL Server with the specified configuration. + + :return: deserialized PostgreSQL Server instance state dictionary + ''' + self.log("Creating / Updating the PostgreSQL Server instance {0}".format(self.name)) + + try: + self.parameters['tags'] = self.tags + if self.to_do == Actions.Create: + response = self.postgresql_client.servers.begin_create(resource_group_name=self.resource_group, + server_name=self.name, + parameters=self.parameters) + else: + # structure of parameters for update must be changed + self.parameters.update(self.parameters.pop("properties", {})) + response = self.postgresql_client.servers.begin_update(resource_group_name=self.resource_group, + server_name=self.name, + parameters=self.parameters) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + + except Exception as exc: + self.log('Error attempting to create the PostgreSQL Server instance.') + self.fail("Error creating the PostgreSQL Server instance: {0}".format(str(exc))) + return response.as_dict() + + def delete_postgresqlserver(self): + ''' + Deletes specified PostgreSQL Server instance in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the PostgreSQL Server instance {0}".format(self.name)) + try: + response = self.postgresql_client.servers.begin_delete(resource_group_name=self.resource_group, + server_name=self.name) + except Exception as e: + self.log('Error attempting to delete the PostgreSQL Server instance.') + self.fail("Error deleting the PostgreSQL Server instance: {0}".format(str(e))) + + return True + + def get_postgresqlserver(self): + ''' + Gets the properties of the specified PostgreSQL Server. + + :return: deserialized PostgreSQL Server instance state dictionary + ''' + self.log("Checking if the PostgreSQL Server instance {0} is present".format(self.name)) + found = False + try: + response = self.postgresql_client.servers.get(resource_group_name=self.resource_group, + server_name=self.name) + found = True + self.log("Response : {0}".format(response)) + self.log("PostgreSQL Server instance : {0} found".format(response.name)) + except ResourceNotFoundError as e: + self.log('Did not find the PostgreSQL Server instance.') + if found is True: + return response.as_dict() + + return False + + +def main(): + """Main execution""" + AzureRMPostgreSqlServers() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlserver_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlserver_info.py new file mode 100644 index 000000000..159148e34 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlserver_info.py @@ -0,0 +1,276 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Zim Kalinowski, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_postgresqlserver_info +version_added: "0.1.2" +short_description: Get Azure PostgreSQL Server facts +description: + - Get facts of PostgreSQL Server. + +options: + resource_group: + description: + - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. + required: True + type: str + name: + description: + - The name of the server. + type: str + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Get instance of PostgreSQL Server + azure_rm_postgresqlserver_info: + resource_group: myResourceGroup + name: server_name + + - name: List instances of PostgreSQL Server + azure_rm_postgresqlserver_info: + resource_group: myResourceGroup + tags: + - key +''' + +RETURN = ''' +servers: + description: + - A list of dictionaries containing facts for PostgreSQL servers. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforPostgreSQL/servers/po + stgreabdud1223" + resource_group: + description: + - Resource group name. + returned: always + type: str + sample: myResourceGroup + name: + description: + - Resource name. + returned: always + type: str + sample: postgreabdud1223 + location: + description: + - The location the resource resides in. + returned: always + type: str + sample: eastus + sku: + description: + - The SKU of the server. + returned: always + type: complex + contains: + name: + description: + - The name of the SKU. + returned: always + type: str + sample: GP_Gen4_2 + tier: + description: + - The tier of the particular SKU. + returned: always + type: str + sample: GeneralPurpose + capacity: + description: + - The scale capacity. + returned: always + type: int + sample: 2 + storage_mb: + description: + - The maximum storage allowed for a server. + returned: always + type: int + sample: 128000 + enforce_ssl: + description: + - Enable SSL enforcement. + returned: always + type: bool + sample: False + admin_username: + description: + - The administrator's login name of a server. + returned: always + type: str + sample: serveradmin + version: + description: + - Server version. + returned: always + type: str + sample: "9.6" + user_visible_state: + description: + - A state of a server that is visible to user. + returned: always + type: str + sample: Ready + fully_qualified_domain_name: + description: + - The fully qualified domain name of a server. + returned: always + type: str + sample: postgreabdud1223.postgres.database.azure.com + backup_retention_days: + description: + - Backup retention period between 7 and 35 days. 7 days by default if not set. + returned: always + type: int + sample: 7 + geo_redundant_backup: + description: + - Choose between locally redundant(default) or geo-redundant backup. This cannot be updated after first deployment. + returned: always + type: str + sample: Disabled + tags: + description: + - Tags assigned to the resource. Dictionary of string:string pairs. + type: dict + sample: { tag1: abc } +''' + + +try: + from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + from azure.core.exceptions import ResourceNotFoundError + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMPostgreSqlServersInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str' + ), + tags=dict( + type='list', + elements='str' + ) + ) + # store the results of the module operation + self.results = dict( + changed=False + ) + self.resource_group = None + self.name = None + self.tags = None + super(AzureRMPostgreSqlServersInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False, facts_module=True) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_postgresqlserver_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_postgresqlserver_facts' module has been renamed to 'azure_rm_postgresqlserver_info'", version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if (self.resource_group is not None and + self.name is not None): + self.results['servers'] = self.get() + elif (self.resource_group is not None): + self.results['servers'] = self.list_by_resource_group() + return self.results + + def get(self): + response = None + results = [] + try: + response = self.postgresql_client.servers.get(resource_group_name=self.resource_group, + server_name=self.name) + self.log("Response : {0}".format(response)) + except ResourceNotFoundError as e: + self.log('Could not get facts for PostgreSQL Server.') + + if response and self.has_tags(response.tags, self.tags): + results.append(self.format_item(response)) + + return results + + def list_by_resource_group(self): + response = None + results = [] + try: + response = self.postgresql_client.servers.list_by_resource_group(resource_group_name=self.resource_group) + self.log("Response : {0}".format(response)) + except Exception as e: + self.log('Could not get facts for PostgreSQL Servers.') + + if response is not None: + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(self.format_item(item)) + + return results + + def format_item(self, item): + d = item.as_dict() + d = { + 'id': d['id'], + 'resource_group': self.resource_group, + 'name': d['name'], + 'sku': d['sku'], + 'location': d['location'], + 'storage_mb': d['storage_profile']['storage_mb'], + 'storage_autogrow': (d['storage_profile']['storage_autogrow'] == 'Enabled'), + 'version': d['version'], + 'enforce_ssl': (d['ssl_enforcement'] == 'Enabled'), + 'admin_username': d['administrator_login'], + 'user_visible_state': d['user_visible_state'], + 'fully_qualified_domain_name': d['fully_qualified_domain_name'], + 'geo_redundant_backup': d['storage_profile']['geo_redundant_backup'], + 'backup_retention_days': d['storage_profile']['backup_retention_days'], + 'tags': d.get('tags') + } + + return d + + +def main(): + AzureRMPostgreSqlServersInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_privatednsrecordset.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_privatednsrecordset.py new file mode 100644 index 000000000..ca998d1ff --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_privatednsrecordset.py @@ -0,0 +1,494 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 Aparna Patil(@aparna-patil) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_privatednsrecordset + +version_added: "1.1.0" + +short_description: Create, delete and update Private DNS record sets and records + +description: + - Creates, deletes, and updates Private DNS records sets and records within an existing Azure Private DNS Zone. + +options: + resource_group: + description: + - Name of resource group. + required: true + type: str + zone_name: + description: + - Name of the existing Private DNS zone in which to manage the record set. + required: true + type: str + relative_name: + description: + - Relative name of the record set. + required: true + type: str + record_type: + description: + - The type of record set to create or delete. + choices: + - A + - AAAA + - CNAME + - MX + - PTR + - SOA + - SRV + - TXT + required: true + type: str + record_mode: + description: + - Whether existing record values not sent to the module should be purged. + default: purge + type: str + choices: + - append + - purge + state: + description: + - Assert the state of the record set. Use C(present) to create or update and C(absent) to delete. + default: present + type: str + choices: + - absent + - present + time_to_live: + description: + - Time to live of the record set in seconds. + default: 3600 + type: int + records: + description: + - List of records to be created depending on the type of record (set). + type: list + elements: dict + suboptions: + preference: + description: + - Used for creating an C(MX) record set/records. + type: int + priority: + description: + - Used for creating an C(SRV) record set/records. + type: int + weight: + description: + - Used for creating an C(SRV) record set/records. + type: int + port: + description: + - Used for creating an C(SRV) record set/records. + type: int + entry: + description: + - Primary data value for all record types. + type: str + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Aparna Patil (@aparna-patil) +''' + +EXAMPLES = ''' + +- name: ensure an "A" record set with multiple records + azure_rm_privatednsrecordset: + resource_group: myResourceGroup + relative_name: www + zone_name: testing.com + record_type: A + records: + - entry: 192.168.100.101 + - entry: 192.168.100.102 + - entry: 192.168.100.103 + +- name: delete a record set + azure_rm_privatednsrecordset: + resource_group: myResourceGroup + record_type: A + relative_name: www + zone_name: testing.com + state: absent + +- name: create multiple "A" record sets with multiple records + azure_rm_privatednsrecordset: + resource_group: myResourceGroup + zone_name: testing.com + relative_name: "{{ item.name }}" + record_type: "{{ item.type }}" + records: "{{ item.records }}" + with_items: + - { name: 'servera', type: 'A', records: [ { entry: '10.10.10.20' }, { entry: '10.10.10.21' }] } + - { name: 'serverb', type: 'A', records: [ { entry: '10.10.10.30' }, { entry: '10.10.10.41' }] } + - { name: 'serverc', type: 'A', records: [ { entry: '10.10.10.40' }, { entry: '10.10.10.41' }] } + +- name: create SRV records in a new record set + azure_rm_privatednsrecordset: + resource_group: myResourceGroup + relative_name: _sip._tcp.testing.com + zone_name: testing.com + time_to_live: 7200 + record_type: SRV + records: + - entry: sip.testing.com + priority: 20 + weight: 10 + port: 5060 + +- name: create PTR record in a new record set + azure_rm_privatednsrecordset: + resource_group: myResourceGroup + relative_name: 192.168.100.101.in-addr.arpa + zone_name: testing.com + record_type: PTR + records: + - entry: servera.testing.com + +- name: create TXT record in a new record set + azure_rm_privatednsrecordset: + resource_group: myResourceGroup + relative_name: mail.testing.com + zone_name: testing.com + record_type: TXT + records: + - entry: 'v=spf1 a -all' + +- name: Update SOA record + azure_rm_privatednsrecordset: + resource_group: myResourceGroup + relative_name: "@" + zone_name: testing.com + record_type: SOA + records: + - host: azureprivatedns.net + email: azureprivatedns-host99.example.com + serial_number: 1 + refresh_time: 3699 + retry_time: 399 + expire_time: 2419299 + minimum_ttl: 399 +''' + +RETURN = ''' +state: + description: + - Current state of the DNS record set. + returned: always + type: complex + contains: + id: + description: + - The DNS record set ID. + returned: always + type: str + sample: "/subscriptions/xxxx......xxx/resourceGroups/v-xisuRG/providers/Microsoft.Network/privateDnsZones/ + b57dc95985712e4523282.com/A/www" + name: + description: + - Relate name of the record set. + returned: always + type: str + sample: 'www' + fqdn: + description: + - Fully qualified domain name of the record set. + returned: always + type: str + sample: www.b57dc95985712e4523282.com + etag: + description: + - The etag of the record set. + returned: always + type: str + sample: 692c3e92-a618-46fc-aecd-8f888807cd6c + is_auto_registered: + description: + - Is the record set auto-registered in the Private DNS zone through a virtual network link. + returned: always + type: bool + sample: false + ttl: + description: + - The TTL(time-to-live) of the records in the records set. + returned: always + type: int + sample: 3600 + type: + description: + - The type of DNS record in this record set. + returned: always + type: str + sample: A + a_records: + description: + - A list of records in the record set. + returned: always + type: list + elements: dict + sample: [ + { + "ipv4_address": "192.0.2.2" + }, + { + "ipv4_address": "192.0.2.4" + }, + { + "ipv4_address": "192.0.2.8" + } + ] +''' + +from ansible.module_utils.basic import _load_params +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase, HAS_AZURE + +try: + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + + +RECORD_ARGSPECS = dict( + A=dict( + ipv4_address=dict(type='str', required=True, aliases=['entry']) + ), + AAAA=dict( + ipv6_address=dict(type='str', required=True, aliases=['entry']) + ), + CNAME=dict( + cname=dict(type='str', required=True, aliases=['entry']) + ), + MX=dict( + preference=dict(type='int', required=True), + exchange=dict(type='str', required=True, aliases=['entry']) + ), + PTR=dict( + ptrdname=dict(type='str', required=True, aliases=['entry']) + ), + SRV=dict( + priority=dict(type='int', required=True), + port=dict(type='int', required=True), + weight=dict(type='int', required=True), + target=dict(type='str', required=True, aliases=['entry']) + ), + TXT=dict( + value=dict(type='list', required=True, aliases=['entry']) + ), + SOA=dict( + host=dict(type='str', aliases=['entry']), + email=dict(type='str'), + serial_number=dict(type='int'), + refresh_time=dict(type='int'), + retry_time=dict(type='int'), + expire_time=dict(type='int'), + minimum_ttl=dict(type='int') + ) +) + +RECORDSET_VALUE_MAP = dict( + A=dict(attrname='a_records', classobj='ARecord', is_list=True), + AAAA=dict(attrname='aaaa_records', classobj='AaaaRecord', is_list=True), + CNAME=dict(attrname='cname_record', classobj='CnameRecord', is_list=False), + MX=dict(attrname='mx_records', classobj='MxRecord', is_list=True), + NS=dict(attrname='ns_records', classobj='NsRecord', is_list=True), + PTR=dict(attrname='ptr_records', classobj='PtrRecord', is_list=True), + SRV=dict(attrname='srv_records', classobj='SrvRecord', is_list=True), + TXT=dict(attrname='txt_records', classobj='TxtRecord', is_list=True), + SOA=dict(attrname='soa_record', classobj='SoaRecord', is_list=False), + CAA=dict(attrname='caa_records', classobj='CaaRecord', is_list=True) +) if HAS_AZURE else {} + + +class AzureRMPrivateDNSRecordSet(AzureRMModuleBase): + + def __init__(self): + + _load_params() + # define user inputs from playbook + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True), + relative_name=dict(type='str', required=True), + zone_name=dict(type='str', required=True), + record_type=dict(choices=RECORD_ARGSPECS.keys(), required=True, type='str'), + record_mode=dict(choices=['append', 'purge'], default='purge'), + state=dict(choices=['present', 'absent'], default='present', type='str'), + time_to_live=dict(type='int', default=3600), + records=dict(type='list', elements='dict') + ) + + required_if = [ + ('state', 'present', ['records']) + ] + + self.results = dict( + changed=False + ) + + # Argument validation + super(AzureRMPrivateDNSRecordSet, self).__init__(self.module_arg_spec, + required_if=required_if, + supports_check_mode=True, + skip_exec=True) + + # check the subspec and metadata + record_subspec = RECORD_ARGSPECS.get(self.module.params['record_type']) + + # patch the right record shape onto the argspec + self.module_arg_spec['records']['options'] = record_subspec + + self.resource_group = None + self.relative_name = None + self.zone_name = None + self.record_type = None + self.record_mode = None + self.state = None + self.time_to_live = None + self.records = None + + # rerun validation and module + super(AzureRMPrivateDNSRecordSet, self).__init__(self.module_arg_spec, + required_if=required_if, + supports_check_mode=True) + + def exec_module(self, **kwargs): + for key in self.module_arg_spec.keys(): + setattr(self, key, kwargs[key]) + + self.log('Fetching private DNS zone {0}'.format(self.zone_name)) + zone = self.private_dns_client.private_zones.get(self.resource_group, self.zone_name) + if not zone: + self.fail('The zone {0} does not exist in the resource group {1}'.format(self.zone_name, + self.resource_group)) + + try: + self.log('Fetching Private DNS Record Set {0}'.format(self.relative_name)) + record_set = self.private_dns_client.record_sets.get(self.resource_group, + self.zone_name, + self.record_type, + self.relative_name) + self.results['state'] = self.recordset_to_dict(record_set) + except ResourceNotFoundError: + record_set = None + + record_type_metadata = RECORDSET_VALUE_MAP.get(self.record_type) + + if self.state == 'present': + # convert the input records to SDK objects + self.input_sdk_records = self.create_sdk_records(self.records, self.record_type) + + if not record_set: + changed = True + else: + # use recordset to get the type-specific records + server_records = getattr(record_set, record_type_metadata.get('attrname')) + + # Compare the input records to the server records + self.input_sdk_records, changed = self.records_changed(self.input_sdk_records, server_records) + + # check the top-level recordset properties + changed |= record_set.ttl != self.time_to_live + + self.results['changed'] |= changed + + elif self.state == 'absent': + if record_set: + self.results['changed'] = True + + if self.check_mode: + return self.results + + if self.results['changed']: + if self.state == 'present': + record_set_args = dict( + ttl=self.time_to_live + ) + + record_set_args[record_type_metadata['attrname']] = \ + self.input_sdk_records if record_type_metadata['is_list'] else self.input_sdk_records[0] + + record_set = self.private_dns_models.RecordSet(**record_set_args) + # create record set + self.results['state'] = self.create_or_update(record_set) + + elif self.state == 'absent': + # delete record set + self.delete_record_set() + + return self.results + + def create_or_update(self, record_set): + try: + # create the record set + record_set = \ + self.private_dns_client.record_sets.create_or_update(resource_group_name=self.resource_group, + private_zone_name=self.zone_name, + record_type=self.record_type, + relative_record_set_name=self.relative_name, + parameters=record_set) + return self.recordset_to_dict(record_set) + except Exception as exc: + self.fail("Error creating or updating dns record {0} - {1}".format(self.relative_name, + exc.message or str(exc))) + + def delete_record_set(self): + try: + # delete the record set + self.private_dns_client.record_sets.delete(resource_group_name=self.resource_group, + private_zone_name=self.zone_name, + relative_record_set_name=self.relative_name, + record_type=self.record_type) + except Exception as exc: + self.fail("Error deleting record set {0} - {1}".format(self.relative_name, exc.message or str(exc))) + return None + + def create_sdk_records(self, input_records, record_type): + record = RECORDSET_VALUE_MAP.get(record_type) + if not record: + self.fail('record type {0} is not supported now'.format(record_type)) + record_sdk_class = getattr(self.private_dns_models, record.get('classobj')) + return [record_sdk_class(**x) for x in input_records] + + def records_changed(self, input_records, server_records): + # ensure we're always comparing a list, even for the single-valued types + if not isinstance(server_records, list): + server_records = [server_records] + + input_set = set([self.module.jsonify(x.as_dict()) for x in input_records]) + server_set = set([self.module.jsonify(x.as_dict()) for x in server_records]) + + if self.record_mode == 'append': # only a difference if the server set is missing something from the input set + input_set = server_set.union(input_set) + + # non-append mode; any difference in the sets is a change + changed = input_set != server_set + + records = [self.module.from_json(x) for x in input_set] + return self.create_sdk_records(records, self.record_type), changed + + def recordset_to_dict(self, recordset): + result = recordset.as_dict() + result['type'] = result['type'].strip('Microsoft.Network/privateDnsZones/') + return result + + +def main(): + AzureRMPrivateDNSRecordSet() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_privatednsrecordset_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_privatednsrecordset_info.py new file mode 100644 index 000000000..d4c33f2fd --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_privatednsrecordset_info.py @@ -0,0 +1,256 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 Aparna Patil(@aparna-patil) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_privatednsrecordset_info + +version_added: "1.1.0" + +short_description: Get Private DNS Record Set facts + +description: + - Get facts for a specific DNS Record Set in a Private DNS Zone, or a specific type of DNS record in all zones or + one zone etc. + +options: + relative_name: + description: + - Only show results for a Record Set. + type: str + resource_group: + description: + - Limit results by resource group. Required when filtering by name or type. + type: str + zone_name: + description: + - Limit results by zones. Required when filtering by name or type. + type: str + record_type: + description: + - Limit record sets by record type. + type: str + top: + description: + - Limit the maximum number of record sets to return. + type: int + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Aparna Patil (@aparna-patil) + +''' + +EXAMPLES = ''' +- name: Get facts for one record set in one Private DNS Zone + azure_rm_privatednsrecordset_info: + resource_group: myResourceGroup + zone_name: newzone.com + relative_name: servera + record_type: A +- name: Get facts for all Type A record sets in a Private DNS Zone + azure_rm_privatednsrecordset_info: + resource_group: myResourceGroup + zone_name: newzone.com + record_type: A +- name: Get all record sets in a Private DNS Zone + azure_rm_privatednsrecordset_info: + resource_group: myResourceGroup + zone_name: newzone.com +''' + +RETURN = ''' +dnsrecordsets: + description: + - Gets a list of recordsets dict in a Private DNS zone. + returned: always + type: list + elements: dict + sample: [ + { + "fqdn": "servera.newzone.com.", + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/ + Microsoft.Network/privateDnsZones/newzone.com/A/servera", + "record_type": "A", + "records": [ + { + "ipv4_address": "10.10.10.10" + } + ], + "relative_name": "servera", + "time_to_live": 3600 + }, + { + "fqdn": "serverb.newzone.com.", + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/ + Microsoft.Network/privateDnsZones/newzone.com/A/serverb", + "record_type": "A", + "records": [ + { + "ipv4_address": "10.10.10.11" + } + ], + "relative_name": "serverb", + "time_to_live": 3600 + } + ] +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError +except Exception: + # This is handled in azure_rm_common + pass + +AZURE_OBJECT_CLASS = 'RecordSet' + + +RECORDSET_VALUE_MAP = dict( + A='a_records', + AAAA='aaaa_records', + CNAME='cname_record', + MX='mx_records', + PTR='ptr_records', + SRV='srv_records', + TXT='txt_records', + SOA='soa_record' +) + + +class AzureRMPrivateDNSRecordSetInfo(AzureRMModuleBase): + + def __init__(self): + + # define user inputs into argument + self.module_arg_spec = dict( + relative_name=dict(type='str'), + resource_group=dict(type='str'), + zone_name=dict(type='str'), + record_type=dict(type='str'), + top=dict(type='int') + ) + + # store the results of the module operation + self.results = dict( + changed=False, + ) + + self.relative_name = None + self.resource_group = None + self.zone_name = None + self.record_type = None + self.top = None + + super(AzureRMPrivateDNSRecordSetInfo, self).__init__(self.module_arg_spec, supports_check_mode=True) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if not self.top or self.top <= 0: + self.top = None + + # create conditionals to catch errors when calling record facts + if self.relative_name and not self.resource_group: + self.fail("Parameter error: resource group required when filtering by name or record type.") + if self.relative_name and not self.zone_name: + self.fail("Parameter error: DNS Zone required when filtering by name or record type.") + + results = [] + # list the conditions for what to return based on input + if self.relative_name is not None: + # if there is a name listed, they want only facts about that specific Record Set itself + results = self.get_item() + elif self.record_type: + # else, they just want all the record sets of a specific type + results = self.list_type() + elif self.zone_name: + # if there is a zone name listed, then they want all the record sets in a zone + results = self.list_zone() + + self.results['dnsrecordsets'] = self.curated_list(results) + return self.results + + def get_item(self): + self.log('Get properties for {0}'.format(self.relative_name)) + item = None + results = [] + + # try to get information for specific Record Set + try: + item = self.private_dns_client.record_sets.get(self.resource_group, + self.zone_name, + self.record_type, + self.relative_name) + except ResourceNotFoundError: + pass + + results = [item] + return results + + def list_type(self): + self.log('Lists the record sets of a specified type in a Private DNS zone') + try: + response = self.private_dns_client.record_sets.list_by_type(self.resource_group, + self.zone_name, + self.record_type, + top=self.top) + except Exception as exc: + self.fail("Failed to list for record type {0} - {1}".format(self.record_type, str(exc))) + + results = [] + for item in response: + results.append(item) + return results + + def list_zone(self): + self.log('Lists all record sets in a Private DNS zone') + try: + response = self.private_dns_client.record_sets.list(self.resource_group, self.zone_name, top=self.top) + except Exception as exc: + self.fail("Failed to list for zone {0} - {1}".format(self.zone_name, str(exc))) + + results = [] + for item in response: + results.append(item) + return results + + def curated_list(self, raws): + return [self.record_to_dict(item) for item in raws] if raws else [] + + def record_to_dict(self, record): + record_type = record.type[len('Microsoft.Network/privateDnsZones/'):] + records = getattr(record, RECORDSET_VALUE_MAP.get(record_type)) + if records: + if not isinstance(records, list): + records = [records] + else: + records = [] + return dict( + id=record.id, + relative_name=record.name, + record_type=record_type, + records=[x.as_dict() for x in records], + time_to_live=record.ttl, + fqdn=record.fqdn + ) + + +def main(): + AzureRMPrivateDNSRecordSetInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_privatednszone.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_privatednszone.py new file mode 100644 index 000000000..4a84cdbce --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_privatednszone.py @@ -0,0 +1,225 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2020 Jose Angel Munoz, +# +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_privatednszone + +version_added: "0.0.1" + +short_description: Manage Azure private DNS zones + +description: + - Creates and deletes Azure private DNS zones. + +options: + resource_group: + description: + - Name of resource group. + type: str + required: true + name: + description: + - Name of the private DNS zone. + type: str + required: true + state: + description: + - Assert the state of the zone. Use C(present) to create or update and C(absent) to delete. + default: present + type: str + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Jose Angel Munoz (@imjoseangel) +''' + +EXAMPLES = ''' + +- name: Create a private DNS zone + azure_rm_privatednszone: + resource_group: myResourceGroup + name: example.com + +- name: Delete a private DNS zone + azure_rm_privatednszone: + resource_group: myResourceGroup + name: example.com + state: absent + +''' + +RETURN = ''' +state: + description: + - Current state of the zone. + returned: always + type: dict + sample: { + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup", + "location": "global", + "name": "Testing", + "number_of_record_sets": 2, + "number_of_virtual_network_links": 0, + "number_of_virtual_network_links_with_registration": 0 + } + +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id +from ansible.module_utils._text import to_native + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.core.polling import LROPoller +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMPrivateDNSZone(AzureRMModuleBase): + def __init__(self): + + # define user inputs from playbook + self.module_arg_spec = dict(resource_group=dict(type='str', + required=True), + name=dict(type='str', required=True), + state=dict(choices=['present', 'absent'], + default='present', + type='str')) + + # store the results of the module operation + self.results = dict(changed=False, state=dict()) + + self.resource_group = None + self.name = None + self.state = None + self.tags = None + + super(AzureRMPrivateDNSZone, self).__init__(self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + + # create a new zone variable in case the 'try' doesn't find a zone + zone = None + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + self.results['check_mode'] = self.check_mode + + # retrieve resource group to make sure it exists + self.get_resource_group(self.resource_group) + + changed = False + results = dict() + + try: + self.log('Fetching private DNS zone {0}'.format(self.name)) + zone = self.private_dns_client.private_zones.get( + self.resource_group, self.name) + + # serialize object into a dictionary + results = zone_to_dict(zone) + + # don't change anything if creating an existing zone, but change if deleting it + if self.state == 'present': + changed = False + + update_tags, results['tags'] = self.update_tags( + results['tags']) + if update_tags: + changed = True + elif self.state == 'absent': + changed = True + + except ResourceNotFoundError: + # the zone does not exist so create it + if self.state == 'present': + changed = True + else: + # you can't delete what is not there + changed = False + + self.results['changed'] = changed + self.results['state'] = results + + # return the results if your only gathering information + if self.check_mode: + return self.results + + if changed: + if self.state == 'present': + zone = self.private_dns_models.PrivateZone(tags=self.tags, + location='global') + self.results['state'] = self.create_or_update_zone(zone) + elif self.state == 'absent': + # delete zone + self.delete_zone() + # the delete does not actually return anything. if no exception, then we'll assume + # it worked. + self.results['state']['status'] = 'Deleted' + + return self.results + + def create_or_update_zone(self, zone): + try: + # create or update the new Zone object we created + new_zone = self.private_dns_client.private_zones.begin_create_or_update( + self.resource_group, self.name, zone) + + if isinstance(new_zone, LROPoller): + new_zone = self.get_poller_result(new_zone) + + except Exception as exc: + self.fail("Error creating or updating zone {0} - {1}".format( + self.name, exc.message or str(exc))) + return zone_to_dict(new_zone) + + def delete_zone(self): + try: + # delete the Zone + poller = self.private_dns_client.private_zones.begin_delete( + self.resource_group, self.name) + result = self.get_poller_result(poller) + except Exception as exc: + self.fail("Error deleting zone {0} - {1}".format( + self.name, exc.message or str(exc))) + return result + + +def zone_to_dict(zone): + # turn Zone object into a dictionary (serialization) + result = dict( + id=zone.id, + name=zone.name, + number_of_record_sets=zone.number_of_record_sets, + number_of_virtual_network_links=zone.number_of_virtual_network_links, + number_of_virtual_network_links_with_registration=zone. + number_of_virtual_network_links_with_registration, + tags=zone.tags) + return result + + +def main(): + AzureRMPrivateDNSZone() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_privatednszone_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_privatednszone_info.py new file mode 100644 index 000000000..5a5730e4c --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_privatednszone_info.py @@ -0,0 +1,244 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2020 Jose Angel Munoz, +# +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_privatednszone_info + +version_added: "0.0.1" + +short_description: Get private DNS zone facts + +description: + - Get facts for a specific private DNS zone or all private DNS zones within a resource group. + +options: + resource_group: + description: + - Limit results by resource group. Required when filtering by name. + type: str + name: + description: + - Only show results for a specific zone. + type: str + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Jose Angel Munoz (@imjoseangel) + +''' + +EXAMPLES = ''' +- name: Get facts for one zone + azure_rm_privatednszone_info: + resource_group: myResourceGroup + name: foobar22 + +- name: Get facts for all zones in a resource group + azure_rm_privatednszone_info: + resource_group: myResourceGroup + +- name: Get facts for privatednszone with tags + azure_rm_privatednszone_info: + tags: + - testing + - foo:bar +''' + +RETURN = ''' +azure_privatednszones: + description: + - List of private zone dicts. + returned: always + type: list + example: [{ + "etag": "00000002-0000-0000-0dcb-df5776efd201", + "location": "global", + "properties": { + "maxNumberOfRecordSets": 5000, + "number_of_virtual_network_links": 0, + "number_of_virtual_network_links_with_registration": 0 + }, + "tags": {} + }] +privatednszones: + description: + - List of private zone dicts, which share the same layout as azure_rm_privatednszone module parameter. + returned: always + type: list + contains: + id: + description: + - id of the private DNS Zone. + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/privatednszones/azure.com" + type: str + name: + description: + - name of the private DNS zone. + sample: azure.com + type: str + number_of_record_sets: + description: + - The current number of record sets in this private DNS zone. + type: int + sample: 2 + number_of_virtual_network_links: + description: + - The current number of network links in this private DNS zone. + type: int + sample: 0 + number_of_virtual_network_links_with_registration: + description: + - The current number of network links with registration in this private DNS zone. + type: int + sample: 0 +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +from ansible.module_utils._text import to_native + +try: + from azure.core.exceptions import ResourceNotFoundError +except Exception: + # This is handled in azure_rm_common + pass + +AZURE_OBJECT_CLASS = 'PrivateDnsZone' + + +class AzurePrivateRMDNSZoneInfo(AzureRMModuleBase): + def __init__(self): + + # define user inputs into argument + self.module_arg_spec = dict(name=dict(type='str'), + resource_group=dict(type='str'), + tags=dict(type='list', elements='str')) + + # store the results of the module operation + self.results = dict(changed=False, + ansible_info=dict(azure_privatednszones=[])) + + self.name = None + self.resource_group = None + self.tags = None + + super(AzurePrivateRMDNSZoneInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False, facts_module=True) + + def exec_module(self, **kwargs): + + is_old_facts = self.module._name == 'azure_rm_privatednszone_facts' + if is_old_facts: + self.module.deprecate( + "The 'azure_rm_privatednszone_facts' module has been renamed to 'azure_rm_privatednszone_info'", + version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.name and not self.resource_group: + self.fail( + "Parameter error: resource group required when filtering by name." + ) + + results = [] + # list the conditions and what to return based on user input + if self.name is not None: + # if there is a name, facts about that specific zone + results = self.get_item() + elif self.resource_group: + # all the zones listed in that specific resource group + results = self.list_resource_group() + else: + # all the zones in a subscription + results = self.list_items() + + self.results['ansible_info'][ + 'azure_privatednszones'] = self.serialize_items(results) + self.results['privatednszones'] = self.curated_items(results) + + return self.results + + def get_item(self): + self.log('Get properties for {0}'.format(self.name)) + item = None + results = [] + # get specific zone + try: + item = self.private_dns_client.private_zones.get( + self.resource_group, self.name) + except ResourceNotFoundError: + pass + + # serialize result + if item and self.has_tags(item.tags, self.tags): + results = [item] + return results + + def list_resource_group(self): + self.log('List items for resource group') + try: + response = self.private_dns_client.private_zones.list_by_resource_group( + self.resource_group) + except Exception as exc: + self.fail("Failed to list for resource group {0} - {1}".format( + self.resource_group, str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(item) + return results + + def list_items(self): + self.log('List all items') + try: + response = self.private_dns_client.private_zones.list() + except Exception as exc: + self.fail("Failed to list all items - {0}".format(str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(item) + return results + + def serialize_items(self, raws): + return [self.serialize_obj(item, AZURE_OBJECT_CLASS) + for item in raws] if raws else [] + + def curated_items(self, raws): + return [self.zone_to_dict(item) for item in raws] if raws else [] + + def zone_to_dict(self, zone): + return dict(id=zone.id, + name=zone.name, + number_of_record_sets=zone.number_of_record_sets, + number_of_virtual_network_links=zone. + number_of_virtual_network_links, + number_of_virtual_network_links_with_registration=zone. + number_of_virtual_network_links_with_registration, + tags=zone.tags) + + +def main(): + AzurePrivateRMDNSZoneInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_privatednszonelink.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_privatednszonelink.py new file mode 100644 index 000000000..b87b54a6b --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_privatednszonelink.py @@ -0,0 +1,324 @@ +#!/usr/bin/python +# +# Copyright (c) 2021 Aparna Patil(@techcon65) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_privatednszonelink + +version_added: "1.6.0" + +short_description: Create, delete and update Virtual network link for Private DNS zone + +description: + - Creates, deletes, and updates Virtual network links for an existing Azure Private DNS Zone. + +options: + resource_group: + description: + - Name of resource group. + required: true + type: str + name: + description: + - The name of the virtual network link. + required: true + type: str + zone_name: + description: + - The name of the Private DNS zone. + required: true + type: str + registration_enabled: + description: + - Is auto-registration of virtual machine records in the virtual network in the Private DNS zone enabled + default: false + type: bool + virtual_network: + description: + - The reference of the virtual network. + type: str + state: + description: + - Assert the state of the virtual network link. Use C(present) to create or update and C(absent) to delete. + default: present + type: str + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Aparna Patil (@techcon65) +''' + +EXAMPLES = ''' +- name: Create a virtual network link + azure_rm_privatednszonelink: + resource_group: myResourceGroup + name: vnetlink1 + zone_name: privatezone.com + virtual_network: MyAzureVNet + state: present + +- name: Update virtual network link + azure_rm_privatednszonelink: + resource_group: myResourceGroup + name: vnetlink1 + zone_name: privatezone.com + virtual_network: MyAzureVNet + registration_enabled: true + state: present + tags: + key1: "value1" + +- name: Delete a virtual network link + azure_rm_privatednszonelink: + resource_group: myResourceGroup + name: vnetlink1 + zone_name: privatezone.com + state: absent +''' + +RETURN = ''' +state: + description: + - Current state of the Virtual network link. + returned: always + type: complex + contains: + id: + description: + - The Virtual network link ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/ + Microsoft.Network/privateDnsZones/privatezone.com/virtualNetworkLinks/vnetlink1" + name: + description: + - Virtual network link name. + returned: always + type: str + sample: 'vnetlink1' + location: + description: + - The Azure Region where the resource lives. + returned: always + type: str + sample: global + etag: + description: + - The etag of the virtual network link. + returned: always + type: str + sample: 692c3e92-a618-46fc-aecd-8f888807cd6c + tags: + description: + - Resource tags. + returned: always + type: list + sample: [{"key1": "value1"}] + virtual_network: + description: + - Reference to virtual network. + returned: always + type: dict + sample: { + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/ + providers/Microsoft.Network/virtualNetworks/MyAzureVNet" + } + registration_enabled: + description: + - The status of auto-registration of virtual machine records in the virtual network in private DNS zone. + returned: always + type: bool + sample: true + provisioning_state: + description: + - The provisioning state of the resource. + returned: always + type: str + sample: Succeeded + virtual_network_link_state: + description: + - The status of the virtual network link. + returned: always + type: str + sample: Completed +''' + +from ansible.module_utils.basic import _load_params +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase, HAS_AZURE, \ + format_resource_id + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.core.polling import LROPoller +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMVirtualNetworkLink(AzureRMModuleBase): + + def __init__(self): + + _load_params() + # define user inputs from playbook + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True), + name=dict(type='str', required=True), + zone_name=dict(type='str', required=True), + virtual_network=dict(type='str'), + state=dict(choices=['present', 'absent'], default='present', type='str'), + registration_enabled=dict(type='bool', default=False) + ) + + required_if = [ + ('state', 'present', ['virtual_network']) + ] + + self.results = dict( + changed=False, + state=dict() + ) + + self.resource_group = None + self.name = None + self.zone_name = None + self.virtual_network = None + self.registration_enabled = None + self.state = None + self.tags = None + self.log_path = None + self.log_mode = None + + super(AzureRMVirtualNetworkLink, self).__init__(self.module_arg_spec, + required_if=required_if, + supports_tags=True, + supports_check_mode=True) + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + changed = False + results = dict() + zone = None + virtual_network_link_old = None + virtual_network_link_new = None + + # retrieve resource group to make sure it exists + self.get_resource_group(self.resource_group) + + if self.virtual_network: + virtual_network = self.parse_resource_to_dict(self.virtual_network) + self.virtual_network = format_resource_id(val=virtual_network['name'], + subscription_id=virtual_network['subscription_id'], + namespace='Microsoft.Network', + types='virtualNetworks', + resource_group=virtual_network['resource_group']) + + self.log('Fetching Private DNS zone {0}'.format(self.zone_name)) + zone = self.private_dns_client.private_zones.get(self.resource_group, self.zone_name) + if not zone: + self.fail('The zone {0} does not exist in the resource group {1}'.format(self.zone_name, + self.resource_group)) + + try: + self.log('Fetching Virtual network link {0}'.format(self.name)) + virtual_network_link_old = self.private_dns_client.virtual_network_links.get(self.resource_group, + self.zone_name, + self.name) + # serialize object into a dictionary + results = self.vnetlink_to_dict(virtual_network_link_old) + if self.state == 'present': + changed = False + update_tags, results['tags'] = self.update_tags(results['tags']) + if update_tags: + changed = True + self.tags = results['tags'] + if self.registration_enabled != results['registration_enabled']: + changed = True + results['registration_enabled'] = self.registration_enabled + elif self.state == 'absent': + changed = True + + except ResourceNotFoundError: + if self.state == 'present': + changed = True + else: + changed = False + + self.results['changed'] = changed + self.results['state'] = results + + if self.check_mode: + return self.results + + if changed: + if self.state == 'present': + # create or update Virtual network link + virtual_network_link_new = \ + self.private_dns_models.VirtualNetworkLink(location='global', + registration_enabled=self.registration_enabled) + if self.virtual_network: + virtual_network_link_new.virtual_network = \ + self.network_models.VirtualNetwork(id=self.virtual_network) + if self.tags: + virtual_network_link_new.tags = self.tags + self.results['state'] = self.create_or_update_network_link(virtual_network_link_new) + + elif self.state == 'absent': + # delete virtual network link + self.delete_network_link() + self.results['state'] = 'Deleted' + + return self.results + + def create_or_update_network_link(self, virtual_network_link): + try: + # create the virtual network link + response = \ + self.private_dns_client.virtual_network_links.begin_create_or_update(resource_group_name=self.resource_group, + private_zone_name=self.zone_name, + virtual_network_link_name=self.name, + parameters=virtual_network_link) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + except Exception as exc: + self.fail("Error creating or updating virtual network link {0} - {1}".format(self.name, str(exc))) + return self.vnetlink_to_dict(response) + + def delete_network_link(self): + try: + # delete the virtual network link + response = self.private_dns_client.virtual_network_links.begin_delete(resource_group_name=self.resource_group, + private_zone_name=self.zone_name, + virtual_network_link_name=self.name) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + except Exception as exc: + self.fail("Error deleting virtual network link {0} - {1}".format(self.name, str(exc))) + return response + + def vnetlink_to_dict(self, virtualnetworklink): + result = virtualnetworklink.as_dict() + result['tags'] = virtualnetworklink.tags + return result + + +def main(): + AzureRMVirtualNetworkLink() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_privatednszonelink_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_privatednszonelink_info.py new file mode 100644 index 000000000..5907ea701 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_privatednszonelink_info.py @@ -0,0 +1,195 @@ +#!/usr/bin/python +# +# Copyright (c) 2021 Aparna Patil(@techcon65) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_privatednszonelink_info + +version_added: "1.6.0" + +short_description: Get Virtual Network link facts for private DNS zone + +description: + - Get a specified virtual network link or all virtual network links facts for a Private DNS zone. + +options: + resource_group: + description: + - Name of resource group. + required: true + type: str + name: + description: + - The name of the virtual network link. + type: str + zone_name: + description: + - The name of the Private DNS zone. + required: true + type: str + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Aparna Patil (@techcon65) + +''' + +EXAMPLES = ''' +- name: Get facts for one virtual network link in private DNS zone + azure_rm_privatednszonelink_info: + resource_group: myResourceGroup + name: vnetlink1 + zone_name: privatezone.com + +- name: Get facts for all virtual network links in private DNS zone + azure_rm_privatednszonelink_info: + resource_group: myResourceGroup + zone_name: privatezone.com +''' + +RETURN = ''' +virtualnetworklinks: + description: + - Gets a list of virtual network links dict in a Private DNS zone. + returned: always + type: list + elements: dict + sample: [ + { + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/ + Microsoft.Network/privateDnsZones/privatezone.com/virtualNetworkLinks/vnetlink1", + "name": "vnetlink1", + "provisioning_state": "Succeeded", + "registration_enabled": true, + "tags": { + "key1": "value1" + }, + "virtual_network": { + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/ + providers/Microsoft.Network/virtualNetworks/MyAzureVNet" + }, + "virtual_network_link_state": "Completed" + } + ] +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError +except Exception: + # This is handled in azure_rm_common + pass + +AZURE_OBJECT_CLASS = 'VirtualNetworkLink' + + +class AzureRMVirtualNetworkLinkInfo(AzureRMModuleBase): + + def __init__(self): + + # define user inputs into argument + self.module_arg_spec = dict( + name=dict(type='str'), + resource_group=dict(type='str', required=True), + zone_name=dict(type='str', required=True), + tags=dict(type='list', elements='str') + ) + + # store the results of the module operation + self.results = dict( + changed=False + ) + + self.name = None + self.resource_group = None + self.zone_name = None + self.tags = None + self.log_path = None + self.log_mode = None + + super(AzureRMVirtualNetworkLinkInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False, facts_module=True) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + results = [] + # list the conditions and results to return based on user input + if self.name is not None: + # if there is a link name provided, return facts about that specific virtual network link + results = self.get_item() + else: + # all the virtual network links in specified private DNS zone + results = self.list_items() + + self.results['virtualnetworklinks'] = self.curated_items(results) + + return self.results + + def get_item(self): + self.log('Get properties for {0}'.format(self.name)) + item = None + results = [] + # get specific virtual network link + try: + item = self.private_dns_client.virtual_network_links.get(self.resource_group, + self.zone_name, + self.name) + except ResourceNotFoundError: + pass + + # serialize result + if item and self.has_tags(item.tags, self.tags): + results = [item] + return results + + def list_items(self): + self.log('List all virtual network links for private DNS zone - {0}'.format(self.zone_name)) + try: + response = self.private_dns_client.virtual_network_links.list(self.resource_group, self.zone_name) + except Exception as exc: + self.fail("Failed to list all items - {0}".format(str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(item) + return results + + def curated_items(self, raws): + return [self.vnetlink_to_dict(item) for item in raws] if raws else [] + + def vnetlink_to_dict(self, link): + result = dict( + id=link.id, + name=link.name, + virtual_network=dict(id=link.virtual_network.id), + registration_enabled=link.registration_enabled, + tags=link.tags, + virtual_network_link_state=link.virtual_network_link_state, + provisioning_state=link.provisioning_state + ) + return result + + +def main(): + AzureRMVirtualNetworkLinkInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_privateendpoint.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_privateendpoint.py new file mode 100644 index 000000000..a757f15b7 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_privateendpoint.py @@ -0,0 +1,341 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 Fred-Sun, (@Fred-Sun) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_privateendpoint +version_added: "1.8.0" +short_description: Manage Azure private endpoint +description: + - Create, update or delete a private endpoint. +options: + resource_group: + description: + - Name of resource group. + required: true + type: str + location: + description: + - Valid Azure location. Defaults to location of the resource group. + type: str + name: + description: + - Name of the private endpoint. + required: true + type: str + subnet: + description: + - The ID of the subnet from which the private IP will be allocated. + - This parameter is required for create or update. + type: dict + suboptions: + id: + description: + - The ID of the subnet from which the private IP will be allocated. + type: str + private_link_service_connections: + description: + - A grouping of information about the connection to the remote resource. + - This parameter is required for create or update. + type: list + elements: dict + suboptions: + name: + description: + - The name of the resource that is unique within a resource group. + type: str + private_link_service_id: + description: + - The resource id of the private endpoint to connect to. + type: str + group_ids: + description: + - The ID(s) of the group(s) obtained from the remote resource that this private endpoint should connect to. + type: list + elements: str + state: + description: + - State of the virtual network. Use C(present) to create or update and C(absent) to delete. + default: present + type: str + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Fred-sun (@Fred-sun) + +''' + +EXAMPLES = ''' +- name: Create private endpoint + azure_rm_privateendpoint: + name: testprivateendpoint + resource_group: v-xisuRG + private_link_service_connections: + - name: Test_private_link_service + private_link_service_id: /subscriptions/xxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/privateLinkServices/testervice + subnet: + id: /subscriptions/xxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/fredvnet/subnets/default + tags: + key1: value1 + key2: value2 + +- name: Delete private endpoint + azure_rm_privateendpoint: + name: testprivateendpoint + resource_group: myResourceGroup + state: absent +''' + + +RETURN = ''' +state: + description: + - List of private endpoint dict with same format as M(azure.azcollection.azure_rm_privateendpoint) module paramter. + returned: always + type: complex + contains: + id: + description: + - Resource ID of the private endpoint. + sample: /subscriptions/xxx-xxx-xxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/privateEndpoints/testprivateendpoint + returned: always + type: str + etag: + description: + - A unique read-only string that changes whenever the resource is updated. + sample: 'W/\"20803842-7d51-46b2-a790-ded8971b4d8a' + returned: always + type: str + network_interfaces: + description: + - List ID of the network interfaces. + returned: always + type: list + sample: ["/subscriptions/xxx-xxx-xxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/networkInterfaces/fredprivateendpoint002.nic"] + location: + description: + - Valid Azure location. + returned: always + type: str + sample: eastus + tags: + description: + - Tags assigned to the resource. Dictionary of string:string pairs. + returned: always + type: dict + sample: { "tag1": "abc" } + provisioning_state: + description: + - Provisioning state of the resource. + returned: always + sample: Succeeded + type: str + name: + description: + - Name of the private endpoint. + returned: always + type: str + sample: estprivateendpoint + subnets_id: + description: + - Subnets associated with the virtual network. + returned: always + type: str + sample: "/subscriptions/xxx-xxx-xxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/fredtestRG-vnet/subnets/default" + private_link_service_connections: + description: + - The resource id of the private endpoint to connect. + returned: always + type: list + sample: ["/subscriptions/xxx/resourceGroups/myRG/providers/Microsoft.Network/privateEndpoints/point/privateLinkServiceConnections/point",] + type: + description: + - Resource type. + returned: always + type: str + sample: Microsoft.Network/privateEndpoints +''' + +try: + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt + + +network_interfaces_spec = dict( + id=dict(type='str') +) + + +private_service_connection_spec = dict( + name=dict(type='str'), + private_link_service_id=dict(type='str'), + group_ids=dict(type='list', elements='str') +) + + +subnet_spec = dict( + id=dict(type='str') +) + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMPrivateEndpoint(AzureRMModuleBaseExt): + + def __init__(self): + + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True), + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + location=dict(type='str'), + subnet=dict(type='dict', options=subnet_spec), + private_link_service_connections=dict(type='list', elements='dict', options=private_service_connection_spec), + ) + + self.resource_group = None + self.name = None + self.state = None + self.location = None + self.body = {} + self.tags = None + + self.results = dict( + changed=False, + state=dict() + ) + self.to_do = Actions.NoAction + + super(AzureRMPrivateEndpoint, self).__init__(self.module_arg_spec, + supports_tags=True, + supports_check_mode=True) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()) + ['tags']: + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + self.body[key] = kwargs[key] + + self.inflate_parameters(self.module_arg_spec, self.body, 0) + + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + # Set default location + self.location = resource_group.location + self.body['location'] = self.location + self.body['tags'] = self.tags + + self.log('Fetching private endpoint {0}'.format(self.name)) + old_response = self.get_resource() + + if old_response is None: + if self.state == "present": + self.to_do = Actions.Create + else: + if self.state == 'absent': + self.to_do = Actions.Delete + else: + update_tags, newtags = self.update_tags(old_response.get('tags', {})) + if update_tags: + self.body['tags'] = newtags + self.to_do = Actions.Update + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.results['changed'] = True + if self.check_mode: + return self.results + response = self.create_update_resource_private_endpoint(self.body) + elif self.to_do == Actions.Delete: + self.results['changed'] = True + if self.check_mode: + return self.results + response = self.delete_private_endpoint() + else: + self.results['changed'] = False + response = old_response + if response is not None: + self.results['state'] = response + return self.results + + def create_update_resource_private_endpoint(self, privateendpoint): + try: + poller = self.network_client.private_endpoints.begin_create_or_update(resource_group_name=self.resource_group, + private_endpoint_name=self.name, parameters=privateendpoint) + new_privateendpoint = self.get_poller_result(poller) + except Exception as exc: + self.fail("Error creating or updating private endpoint {0} - {1}".format(self.name, str(exc))) + + return self.private_endpoints_to_dict(new_privateendpoint) + + def delete_private_endpoint(self): + try: + poller = self.network_client.private_endpoints.begin_delete(self.resource_group, self.name) + result = self.get_poller_result(poller) + except Exception as exc: + self.fail("Error deleting private endpoint {0} - {1}".format(self.name, str(exc))) + return result + + def get_resource(self): + found = False + try: + private_endpoint = self.network_client.private_endpoints.get(self.resource_group, self.name) + results = self.private_endpoints_to_dict(private_endpoint) + found = True + self.log("Response : {0}".format(results)) + except ResourceNotFoundError: + self.log("Did not find the private endpoint resource") + if found is True: + return results + else: + return None + + def private_endpoints_to_dict(self, privateendpoint): + results = dict( + id=privateendpoint.id, + name=privateendpoint.name, + location=privateendpoint.location, + tags=privateendpoint.tags, + provisioning_state=privateendpoint.provisioning_state, + type=privateendpoint.type, + etag=privateendpoint.etag, + subnet=dict(id=privateendpoint.subnet.id) + ) + if privateendpoint.network_interfaces and len(privateendpoint.network_interfaces) > 0: + results['network_interfaces'] = [] + for interface in privateendpoint.network_interfaces: + results['network_interfaces'].append(interface.id) + if privateendpoint.private_link_service_connections and len(privateendpoint.private_link_service_connections) > 0: + results['private_link_service_connections'] = [] + for connections in privateendpoint.private_link_service_connections: + results['private_link_service_connections'].append(dict(private_link_service_id=connections.private_link_service_id, name=connections.name)) + + return results + + +def main(): + AzureRMPrivateEndpoint() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_privateendpoint_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_privateendpoint_info.py new file mode 100644 index 000000000..dbf39e969 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_privateendpoint_info.py @@ -0,0 +1,303 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 Fred-Sun, (@Fred-Sun) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_privateendpoint_info + +version_added: "1.8.0" + +short_description: Get private endpoints info + +description: + - Get facts for private endpoints. + +options: + name: + description: + - Name of resource group. + type: str + resource_group: + description: + - Limit results by resource group. + type: str + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Fred-sun (@Fred-sun) +''' + +EXAMPLES = ''' + - name: Get facts for one private endpoint + azure_rm_privateendpoint_info: + resource_group: myResourceGroup + name: testprivateendpoint + + - name: Get all private endpoint under the resource group + azure_rm_privateendpoint_info: + resource_group: myResourceGroup + + - name: Get all private endpoint under subscription + azure_rm_virtualnetwork_info: + tags: + - key1:value1 +''' + +RETURN = ''' +state: + description: + - List of private endpoint dict with same format as M(azure.azcollection.azure_rm_privateendpoint) module paramter. + returned: always + type: complex + contains: + id: + description: + - Resource ID of the private endpoint. + sample: /subscriptions/xxx-xxx-xxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/privateEndpoints/testprivateendpoint + returned: always + type: str + etag: + description: + - A unique read-only string that changes whenever the resource is updated. + sample: 'W/\"20803842-7d51-46b2-a790-ded8971b4d8a' + returned: always + type: str + network_interfaces: + description: + - List ID of the network interfaces. + returned: always + type: list + sample: ["/subscriptions/xxx-xxx-xxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/networkInterfaces/fredprivateendpoint002.nic"] + location: + description: + - Valid Azure location. + returned: always + type: str + sample: eastus + tags: + description: + - Tags assigned to the resource. Dictionary of string:string pairs. + returned: always + type: dict + sample: { "tag1": "abc" } + provisioning_state: + description: + - Provisioning state of the resource. + returned: always + sample: Succeeded + type: str + name: + description: + - Name of the private endpoint. + returned: always + type: str + sample: estprivateendpoint + subnets_id: + description: + - Subnets associated with the virtual network. + returned: always + type: str + sample: "/subscriptions/xxx-xxx-xxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/fredtestRG-vnet/subnets/default" + private_link_service_connections: + description: + - The resource id of the private endpoint to connect. + returned: always + type: complex + contains: + id: + description: + - The resource id of the private endpoint to connect. + returned: always + type: str + name: + description: + - The name of the private endpoint connection. + returned: always + type: str + connection_state: + description: + - State details of endpoint connection + type: complex + returned: always + contains: + description: + description: + - The reason for approval/rejection of the connection. + returned: always + type: str + sample: "Auto Approved" + status: + description: + - Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service. + returned: always + type: str + sample: Approved + actions_required: + description: + - A message indicating if changes on the service provider require any updates on the consumer. + type: str + returned: always + sample: "This is action_required string" + group_ids: + description: + - List of group_ids associated with private endpoint + returned: always + type: list + sample: ["postgresqlServer"] + type: + description: + - Resource type. + returned: always + type: str + sample: Microsoft.Network/privateEndpoints +''' + +try: + from azure.core.exceptions import ResourceNotFoundError +except Exception: + # This is handled in azure_rm_common + pass + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + + +class AzureRMPrivateEndpointInfo(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type='str'), + resource_group=dict(type='str'), + tags=dict(type='list', elements='str') + ) + + self.results = dict( + changed=False, + privateendpoints=[] + ) + + self.name = None + self.resource_group = None + self.tags = None + self.results = dict( + changed=False + ) + + super(AzureRMPrivateEndpointInfo, self).__init__(self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + facts_module=True) + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + if self.name is not None: + self.results['privateendpoints'] = self.get_item() + elif self.resource_group is not None: + self.results['privateendpoints'] = self.list_resource_group() + else: + self.results['privateendpoints'] = self.list_items() + + return self.results + + def get_item(self): + self.log('Get properties for {0}'.format(self.name)) + item = None + results = [] + + try: + item = self.network_client.private_endpoints.get(self.resource_group, self.name) + except ResourceNotFoundError: + self.log('Could not get info for @(Model.ModuleOperationNameUpper).') + format_item = self.privateendpoints_to_dict(item) + + if format_item and self.has_tags(format_item['tags'], self.tags): + results = [format_item] + return results + + def list_resource_group(self): + self.log('List items for resource group') + try: + response = self.network_client.private_endpoints.list(self.resource_group) + except ResourceNotFoundError as exc: + self.fail("Failed to list for resource group {0} - {1}".format(self.resource_group, str(exc))) + + results = [] + for item in response: + format_item = self.privateendpoints_to_dict(item) + if self.has_tags(format_item['tags'], self.tags): + results.append(format_item) + return results + + def list_items(self): + self.log('List all for items') + try: + response = self.network_client.private_endpoints.list_by_subscription() + except ResourceNotFoundError as exc: + self.fail("Failed to list all items - {0}".format(str(exc))) + + results = [] + for item in response: + format_item = self.privateendpoints_to_dict(item) + if self.has_tags(format_item['tags'], self.tags): + results.append(format_item) + return results + + def privateendpoints_to_dict(self, privateendpoint): + if privateendpoint is None: + return None + results = dict( + id=privateendpoint.id, + name=privateendpoint.name, + location=privateendpoint.location, + tags=privateendpoint.tags, + provisioning_state=privateendpoint.provisioning_state, + type=privateendpoint.type, + etag=privateendpoint.etag, + subnet_id=privateendpoint.subnet.id + ) + if privateendpoint.network_interfaces and len(privateendpoint.network_interfaces) > 0: + results['network_interfaces'] = [] + for interface in privateendpoint.network_interfaces: + results['network_interfaces'].append(interface.id) + if privateendpoint.private_link_service_connections and len(privateendpoint.private_link_service_connections) > 0: + results['private_link_service_connections'] = [] + for connections in privateendpoint.private_link_service_connections: + connection = {} + connection['connection_state'] = {} + connection['id'] = connections.id + connection['name'] = connections.name + connection['type'] = connections.type + connection['group_ids'] = connections.group_ids + connection['connection_state']['status'] = connections.private_link_service_connection_state.status + connection['connection_state']['description'] = connections.private_link_service_connection_state.description + connection['connection_state']['actions_required'] = connections.private_link_service_connection_state.actions_required + results['private_link_service_connections'].append(connection) + if privateendpoint.manual_private_link_service_connections and len(privateendpoint.manual_private_link_service_connections) > 0: + results['manual_private_link_service_connections'] = [] + for connections in privateendpoint.manual_private_link_service_connections: + results['manual_private_link_service_connections'].append(connections.id) + return results + + +def main(): + AzureRMPrivateEndpointInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_privateendpointconnection.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_privateendpointconnection.py new file mode 100644 index 000000000..85c0234ff --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_privateendpointconnection.py @@ -0,0 +1,348 @@ +#!/usr/bin/python +# +# Copyright (c) 2022 xuzhang3 (@xuzhang3), Fred-sun (@Fred-sun) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_privateendpointconnection + +version_added: "1.12.0" + +short_description: Managed private endpoint connection + +description: + - Update or delete the private endpoint connection. + +options: + name: + description: + - The name of the private end point connection. + type: str + required: True + service_name: + description: + - The name of the private link service. + type: str + required: true + resource_group: + description: + - The name of the resource group. + type: str + required: true + connection_state: + description: + - A collection of information about the state of the connection between service consumer and provider. + type: dict + suboptions: + status: + description: + - Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service. + type: str + choices: + - Approved + - Rejected + - Removed + description: + description: + - The reason for approval/rejection of the connection. + type: str + actions_required: + description: + - A message indicating if changes on the service provider require any updates on the consumer. + type: str + state: + description: + - Assert the state of the connection. Use C(present) to update an connection and C(absent) to delete it. + default: present + type: str + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - xuzhang3 (@xuzhang3) + - Fred-sun (@Fred-sun) +''' + +EXAMPLES = ''' +- name: Update private endpoint connection + azure_rm_privateendpointconnection_info: + name: pe-connection-name + service_name: testserviceName + resource_group: myRG + connection_state: + description: "new_description string" + actions_required: "Message string" + status: "Rejected" + +- name: Delee private endpoint connection + azure_rm_privateendpointconnection_info: + name: pe-connection-name + service_name: testserviceName + resource_group: myRG + state: absent +''' + +RETURN = ''' +state: + description: + - List of private endpoint connection info. + returned: always + type: complex + contains: + id: + description: + - Resource ID of the private endpoint connection. + returned: always + type: str + sample: "/subscriptions/xxx/resourceGroups/myRG/providers/Microsoft.Network/privateLinkServices/linkservice/privateEndpointConnections/link.09" + name: + description: + - Name of the private endpoint connection. + returned: always + type: str + sample: testlink.09 + link_identifier: + description: + - The consumer link id. + returned: always + type: str + sample: 536890208 + PrivateEndpoint: + description: + - The resource of private end point. + type: complex + returned: always + contains: + id: + description: + - The private endpoint resource ID. + type: str + returned: always + sample: "/subscriptions/xxx-xxx/resourceGroups/myRG/providers/Microsoft.Network/privateEndpoints/testlink02" + private_link_service_connection_state: + description: + - A collection of information about the state of the connection between service consumer and provider. + type: complex + returned: always + contains: + description: + description: + - The reason for approval/rejection of the connection. + returned: always + type: str + sample: "Auto Approved" + status: + description: + - Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service. + returned: always + type: str + sample: Approved + actions_required: + description: + - A message indicating if changes on the service provider require any updates on the consumer. + type: str + returned: always + sample: "This is action_required string" + provisioning_state: + description: + - Provisioning state of the resource. + returned: always + type: str + sample: Succeeded + etag: + description: + - A unique read-only string that changes whenever the resource is updated. + type: str + returned: always + sample: "f7d60f37-ea2b-4091-8546-1327f35468c4" + type: + description: + - The resource type. + type: str + returned: always + sample: Microsoft.Network/privateLinkServices/privateEndpointConnections +''' + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.core.polling import LROPoller +except Exception: + # This is handled in azure_rm_common + pass + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + + +connection_state_spec = dict( + status=dict(type='str', choices=['Approved', 'Rejected', 'Removed']), + description=dict(type='str'), + actions_required=dict(type='str') +) + + +class AzureRMPrivateEndpointConnection(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type="str", required=True), + service_name=dict(type="str", required=True), + resource_group=dict(type="str", required=True), + connection_state=dict(type='dict', options=connection_state_spec), + state=dict(type='str', choices=['present', 'absent'], default='present'), + ) + + self.name = None + self.service_name = None + self.resource_group = None + self.connection_state = None + self.results = dict( + changed=False, + ) + + super(AzureRMPrivateEndpointConnection, self).__init__(self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + facts_module=False) + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()): + setattr(self, key, kwargs[key]) + + old_response = self.get_resource() + response = None + changed = False + + if self.state == 'present': + if old_response: + if self.connection_state is not None: + + if self.connection_state.get('status') is not None: + if self.connection_state.get('status') != old_response['private_link_service_connection_state']['status']: + changed = True + else: + self.connection_state['status'] = old_response['private_link_service_connection_state']['status'] + + if self.connection_state.get('description'): + if self.connection_state.get('description') != old_response['private_link_service_connection_state']['description']: + changed = True + else: + self.connection_state['description'] = old_response['private_link_service_connection_state']['description'] + if self.connection_state.get('actions_required'): + if self.connection_state.get('actions_required') != old_response['private_link_service_connection_state']['actions_required']: + changed = True + else: + self.connection_state['actions_required'] = old_response['private_link_service_connection_state']['actions_required'] + + if changed: + if self.check_mode: + self.log("The private endpoint connection is exist, will be updated") + else: + parameters = {'private_link_service_connection_state': self.connection_state} + response = self.update_resource(parameters) + if response: + response = self.connect_to_dict(response) + else: + if self.check_mode: + self.log("Check mode test. The private endpoint connection is exist, No operation in this task") + else: + response = old_response + self.log("The private endpoint connection is exist, No operation in this task") + else: + if self.check_mode: + changed = True + self.log("The private endpoint conneection is not exist, will be created, but this module not support create funciont") + else: + self.fail("The private endpoint connection {0} isn't exist, This Module not support create".format(self.name)) + else: + if old_response: + changed = True + if self.check_mode: + self.log("The private endpoint conneection is exist, will be deleted") + else: + self.delete_resource() + else: + if self.check_mode: + self.log("The private endpoint connection isn't exist, no action") + else: + self.log("The private endpoint connection isn't exist, don't need to delete") + + self.results['changed'] = changed + self.results['state'] = response + return self.results + + def get_resource(self): + self.log("Get properties for {0} in {1}".format(self.name, self.service_name)) + try: + response = self.network_client.private_link_services.get_private_endpoint_connection(self.resource_group, self.service_name, self.name) + return self.connect_to_dict(response) + except ResourceNotFoundError: + self.log("Could not get info for {0} in {1}".format(self.name, self.service_name)) + + return [] + + def update_resource(self, parameters): + self.log("Update the private endpoint connection for {0} in {1}".format(self.name, self.service_name)) + try: + response = self.network_client.private_link_services.update_private_endpoint_connection(self.resource_group, + self.service_name, + self.name, parameters) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + return response + except Exception: + self.log("Update {0} in {1} failed".format(self.name, self.service_name)) + + return [] + + def delete_resource(self): + self.log("delete the private endpoint connection for {0} in {1}".format(self.name, self.service_name)) + try: + response = self.network_client.private_link_services.begin_delete_private_endpoint_connection(self.resource_group, self.service_name, self.name) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + return response + except Exception: + self.log("Delete {0} in {1} failed".format(self.name, self.service_name)) + + return [] + + def connect_to_dict(self, connect_info): + connect = connect_info.as_dict() + result = dict( + id=connect.get("id"), + name=connect.get('name'), + type=connect.get('type'), + etag=connect.get('etag'), + private_endpoint=dict(), + private_link_service_connection_state=dict(), + provisioning_state=connect.get('provisioning_state'), + link_identifier=connect.get('link_identifier') + ) + if connect.get('private_endpoint') is not None: + result['private_endpoint']['id'] = connect.get('private_endpoint')['id'] + + if connect.get('private_link_service_connection_state') is not None: + result['private_link_service_connection_state']['status'] = connect.get('private_link_service_connection_state')['status'] + result['private_link_service_connection_state']['description'] = connect.get('private_link_service_connection_state')['description'] + result['private_link_service_connection_state']['actions_required'] = connect.get('private_link_service_connection_state')['actions_required'] + return result + + +def main(): + AzureRMPrivateEndpointConnection() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_privateendpointconnection_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_privateendpointconnection_info.py new file mode 100644 index 000000000..d654bac49 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_privateendpointconnection_info.py @@ -0,0 +1,234 @@ +#!/usr/bin/python +# +# Copyright (c) 2022 xuzhang3 (@xuzhang3), Fred-sun (@Fred-sun) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_privateendpointconnection_info + +version_added: "1.12.0" + +short_description: Get private endpoint connection info + +description: + - Get facts for private endpoint connection info. + +options: + name: + description: + - The name of the private end point connection. + type: str + service_name: + description: + - The name of the private link service. + type: str + required: true + resource_group: + description: + - The name of the resource group. + type: str + required: true + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - xuzhang3 (@xuzhang3) + - Fred-sun (@Fred-sun) +''' + +EXAMPLES = ''' +- name: Get private endpoint connection info by name + azure_rm_privateendpointconnection_info: + name: pe-connection-name + service_name: testserviceName + resource_group: myRG + +- name: Get all private endpoint connection info by service name + azure_rm_privateendpointconnection_info: + service_name: testserviceName + resource_group: myRG +''' + +RETURN = ''' +endpoint_connection: + description: + - List of private endpoint connection info. + returned: always + type: complex + contains: + id: + description: + - Resource ID of the private endpoint connection. + sample: "/subscriptions/xxx/resourceGroups/myRG/providers/Microsoft.Network/privateLinkServices/linkservice/privateEndpointConnections/link.09" + returned: always + type: str + name: + description: + - Name of the private endpoint connection. + returned: always + type: str + sample: testlink.09 + link_identifier: + description: + - The consumer link id. + returned: always + type: str + sample: 536890208 + PrivateEndpoint: + description: + - The resource of private end point. + type: complex + returned: always + contains: + id: + description: + - The private endpoint resource ID. + type: str + returned: always + sample: "/subscriptions/xxx-xxx/resourceGroups/myRG/providers/Microsoft.Network/privateEndpoints/testlink02" + private_link_service_connection_state: + description: + - A collection of information about the state of the connection between service consumer and provider. + type: complex + returned: always + contains: + description: + description: + - The reason for approval/rejection of the connection. + returned: always + type: str + sample: "Auto Approved" + status: + description: + - Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service. + returned: always + type: str + sample: Approved + actions_required: + description: + - A message indicating if changes on the service provider require any updates on the consumer. + type: str + returned: always + sample: "This is action_required string" + provisioning_state: + description: + - Provisioning state of the resource. + returned: always + type: str + sample: Succeeded + etag: + description: + - A unique read-only string that changes whenever the resource is updated. + type: str + returned: always + sample: "f7d60f37-ea2b-4091-8546-1327f35468c4" + type: + description: + - The resource type. + type: str + returned: always + sample: Microsoft.Network/privateLinkServices/privateEndpointConnections +''' + +try: + from azure.core.exceptions import ResourceNotFoundError +except Exception: + # This is handled in azure_rm_common + pass + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + + +class AzureRMPrivateEndpointConnectionInfo(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type="str"), + service_name=dict(type="str", required=True), + resource_group=dict(type="str", required=True), + ) + + self.name = None + self.service_name = None + self.resource_group = None + self.results = dict( + changed=False, + ) + + super(AzureRMPrivateEndpointConnectionInfo, self).__init__(self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + facts_module=True) + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()): + setattr(self, key, kwargs[key]) + + if self.name is not None: + self.results["endpoint_connection"] = self.get_item() + else: + self.results["endpoint_connection"] = self.list_items() + + return self.results + + def get_item(self): + self.log("Get properties for {0} in {1}".format(self.name, self.service_name)) + + try: + response = self.network_client.private_link_services.get_private_endpoint_connection(self.resource_group, self.service_name, self.name) + return [self.connect_to_dict(response)] + except ResourceNotFoundError: + self.log("Could not get info for {0} in {1}".format(self.name, self.service_name)) + + return [] + + def list_items(self): + result = [] + self.log("List all in {0}".format(self.service_name)) + try: + response = self.network_client.private_link_services.list_private_endpoint_connections(self.resource_group, self.service_name) + while True: + result.append(response.next()) + except StopIteration: + pass + except Exception as exc: + self.fail("Failed to list all items in {0}: {1}".format(self.service_name, str(exc))) + return [self.connect_to_dict(item) for item in result] + + def connect_to_dict(self, connect_info): + connect = connect_info.as_dict() + result = dict( + id=connect.get("id"), + name=connect.get('name'), + type=connect.get('type'), + etag=connect.get('etag'), + private_endpoint=dict(), + private_link_service_connection_state=dict(), + provisioning_state=connect.get('provisioning_state'), + link_identifier=connect.get('link_identifier') + ) + if connect.get('private_endpoint') is not None: + result['private_endpoint']['id'] = connect.get('private_endpoint')['id'] + + if connect.get('private_link_service_connection_state') is not None: + result['private_link_service_connection_state']['status'] = connect.get('private_link_service_connection_state')['status'] + result['private_link_service_connection_state']['description'] = connect.get('private_link_service_connection_state')['description'] + result['private_link_service_connection_state']['actions_required'] = connect.get('private_link_service_connection_state')['actions_required'] + return result + + +def main(): + AzureRMPrivateEndpointConnectionInfo() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_privateendpointdnszonegroup.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_privateendpointdnszonegroup.py new file mode 100644 index 000000000..1d5480de3 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_privateendpointdnszonegroup.py @@ -0,0 +1,367 @@ +#!/usr/bin/python +# +# Copyright (c) 2021 Ross Bender (@l3ender) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_privateendpointdnszonegroup + +version_added: "1.10.0" + +short_description: Create, update, or manage private endpoint DNS zone groups. + +description: + - Create, update, or manage private endpoint DNS zone groups. + +options: + name: + description: + - The name of the private endpoint DNS zone group. + type: str + required: true + private_endpoint: + description: + - Name of private endpoint. + type: str + required: true + resource_group: + description: + - Resource group of the private endpoint. + type: str + required: true + private_dns_zone_configs: + description: + - The Private DNS zones configurations. + type: list + elements: dict + suboptions: + name: + description: + - The name of the private dns zone configs. + type: str + private_dns_zone: + description: + - The name of the Private DNS zone. + - If set, the Private DNS Zone under the current resource group is obtained. + type: str + private_dns_zone_id: + description: + - The ID of the private dns zone. + - If set, gets the value of the specified connection Private DNS Zone. + type: str + state: + description: + - State of the private endpoint DNS zone group. Use C(present) to create or update and C(absent) to delete. + default: present + type: str + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Ross Bender (@l3ender) +''' + +EXAMPLES = ''' +- name: Create zone group for private endpoint + azure_rm_privateendpointdnszonegroup: + name: "my-zone-group" + private_endpoint: "my-private-endpoint" + resource_group: "my-resource-group" + private_dns_zone_configs: + - name: "default" + private_dns_zone: "privatelink.postgres.database.azure.com" + +- name: Create zone group for private endpoint + azure_rm_privateendpointdnszonegroup: + name: "my-zone-group" + private_endpoint: "my-private-endpoint" + resource_group: "my-resource-group" + state: "absent" +''' + + +RETURN = ''' +state: + description: + - Current state of the private endpoint zone group. + returned: always + type: complex + contains: + id: + description: + - Resource ID of the private endpoint zone group. + sample: >- + /subscriptions/xxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/privateEndpoints/ + myPrivateEndpoint/privateDnsZoneGroups/myZoneGroup + returned: always + type: str + name: + description: + - Name of the private endpoint zone group. + returned: always + type: str + sample: myZoneGroup + private_dns_zone_configs: + description: + - List of zone configuration within the zone group. + returned: always + type: list + contains: + name: + description: + - Name of the zone config. + returned: always + type: str + sample: default + private_dns_zone_id: + description: + - ID of the private DNS zone. + returned: always + type: str + sample: >- + /subscriptions/xxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/ + privateDnsZones/privatelink.postgres.database.azure.com + record_sets: + description: + - List of DNS records for zone. + returned: always + type: list + contains: + fqdn: + description: + - Fully qualified domain name of the record. + returned: always + type: str + sample: myPostgreSqlSrv-123.privatelink.postgres.database.azure.com + ip_addresses: + description: + - IP addresses for the record. + returned: always + type: list + sample: ['10.1.0.4'] + provisioning_state: + description: + - Provisioning state of the resource. + returned: always + type: str + sample: Succeeded + record_set_name: + description: + - Name of the record. + returned: always + type: str + sample: myPostgreSqlSrv-123 + record_type: + description: + - Type of record. + returned: always + type: str + sample: A + ttl: + description: + - Time to live value of the record. + returned: always + type: int + sample: 10 + provisioning_state: + description: + - Provisioning state of the resource. + returned: always + type: str + sample: Succeeded +''' + +try: + from msrestazure.tools import resource_id + from azure.core.exceptions import ResourceNotFoundError + from azure.core.polling import LROPoller +except ImportError: + # This is handled in azure_rm_common + pass + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt + + +private_dns_zone_configs_spec = dict( + name=dict(type="str"), + private_dns_zone=dict(type="str"), + private_dns_zone_id=dict(type="str") +) + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMPrivateEndpointDnsZoneGroup(AzureRMModuleBaseExt): + + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type="str", required=True), + private_endpoint=dict(type="str", required=True), + resource_group=dict(type="str", required=True), + private_dns_zone_configs=dict(type="list", elements="dict", options=private_dns_zone_configs_spec), + state=dict(type="str", default="present", choices=["present", "absent"]), + ) + + self.name = None + self.private_endpoint = None + self.resource_group = None + self.state = None + self.parameters = dict() + self.results = dict( + changed=False, + state=dict() + ) + self.to_do = Actions.NoAction + + super(AzureRMPrivateEndpointDnsZoneGroup, self).__init__(self.module_arg_spec, + supports_tags=False, + supports_check_mode=True) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec: + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + self.parameters[key] = kwargs[key] + + for zone_config in self.parameters.get("private_dns_zone_configs", []): + if zone_config.get("private_dns_zone_id") is not None: + self.log("The private_dns_zone_id exist, do nothing") + else: + zone_name = zone_config.pop("private_dns_zone") + zone_config["private_dns_zone_id"] = self.private_dns_zone_id(zone_name) + + self.log("Fetching private endpoint {0}".format(self.name)) + old_response = self.get_zone() + + if old_response is None or not old_response: + if self.state == "present": + self.to_do = Actions.Create + self.ensure_private_endpoint() + else: + if self.state == "absent": + self.to_do = Actions.Delete + else: + self.results["compare"] = [] + if not self.idempotency_check(old_response, self.parameters): + self.to_do = Actions.Update + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.results["changed"] = True + if self.check_mode: + return self.results + response = self.create_update_zone() + elif self.to_do == Actions.Delete: + self.results["changed"] = True + if self.check_mode: + return self.results + response = self.delete_zone() + else: + self.results["changed"] = False + response = old_response + + if response is not None: + self.results["state"] = response + + return self.results + + def get_zone(self): + try: + item = self.network_client.private_dns_zone_groups.get(resource_group_name=self.resource_group, + private_endpoint_name=self.private_endpoint, + private_dns_zone_group_name=self.name) + return self.zone_to_dict(item) + except ResourceNotFoundError: + self.log("Did not find the private endpoint resource") + return None + + def create_update_zone(self): + try: + self.parameters["name"] = self.name + response = self.network_client.private_dns_zone_groups.begin_create_or_update(resource_group_name=self.resource_group, + private_endpoint_name=self.private_endpoint, + private_dns_zone_group_name=self.name, + parameters=self.parameters) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + + return self.zone_to_dict(response) + except Exception as exc: + self.fail("Error creating or updating DNS zone group {0} for private endpoint {1}: {2}".format(self.name, self.private_endpoint, str(exc))) + + def ensure_private_endpoint(self): + try: + self.network_client.private_endpoints.get(resource_group_name=self.resource_group, + private_endpoint_name=self.private_endpoint) + except ResourceNotFoundError: + self.fail("Could not load the private endpoint {0}.".format(self.private_endpoint)) + + def delete_zone(self): + try: + response = self.network_client.private_dns_zone_groups.begin_delete(resource_group_name=self.resource_group, + private_endpoint_name=self.private_endpoint, + private_dns_zone_group_name=self.name) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + + return response + except Exception as exc: + self.fail("Error deleting private endpoint {0}: {1}".format(self.name, str(exc))) + + def zone_to_dict(self, zone): + if zone is not None: + zone_dict = zone.as_dict() + else: + return None + return dict( + id=zone_dict.get("id"), + name=zone_dict.get("name"), + private_dns_zone_configs=[self.zone_config_to_dict(zone_config) for zone_config in zone_dict.get("private_dns_zone_configs", [])], + provisioning_state=zone_dict.get("provisioning_state"), + ) + + def zone_config_to_dict(self, zone_config): + return dict( + name=zone_config.get("name"), + private_dns_zone_id=zone_config.get("private_dns_zone_id"), + record_sets=[self.record_set_to_dict(record_set) for record_set in zone_config.get("record_sets", [])], + ) + + def record_set_to_dict(self, record_set): + return dict( + fqdn=record_set.get("fqdn"), + ip_addresses=record_set.get("ip_addresses"), + provisioning_state=record_set.get("provisioning_state"), + record_set_name=record_set.get("record_set_name"), + record_type=record_set.get("record_type"), + ttl=record_set.get("ttl"), + ) + + def private_dns_zone_id(self, name): + return resource_id(subscription=self.subscription_id, + resource_group=self.resource_group, + namespace='Microsoft.Network', + type='privateDnsZones', + name=name) + + +def main(): + AzureRMPrivateEndpointDnsZoneGroup() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_privateendpointdnszonegroup_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_privateendpointdnszonegroup_info.py new file mode 100644 index 000000000..b59c8a43d --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_privateendpointdnszonegroup_info.py @@ -0,0 +1,251 @@ +#!/usr/bin/python +# +# Copyright (c) 2021 Ross Bender (@l3ender) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_privateendpointdnszonegroup_info + +version_added: "1.10.0" + +short_description: Get private endpoint DNS zone group info. + +description: + - Get facts for private endpoint DNS zone groups. + +options: + name: + description: + - Limit results to a single private endpoint DNS zone group. + type: str + private_endpoint: + description: + - Name of private endpoint. + type: str + required: true + resource_group: + description: + - Resource group of the private endpoint. + type: str + required: true + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Ross Bender (@l3ender) +''' + +EXAMPLES = ''' +- name: Get specific DNS zone groups for a private endpoint + azure_rm_privateendpointdnszonegroup_info: + name: "my-zone-group" + private_endpoint: "my-private-endpoint" + resource_group: "my-resource-group" + +- name: Get all DNS zone groups for a private endpoint + azure_rm_privateendpointdnszonegroup_info: + private_endpoint: "my-private-endpoint" + resource_group: "my-resource-group" +''' + +RETURN = ''' +groups: + description: + - List of private endpoint zone groups. + returned: always + type: complex + contains: + id: + description: + - Resource ID of the private endpoint zone group. + sample: >- + /subscriptions/xxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/privateEndpoints/myPrivateEndpoint/ + privateDnsZoneGroups/myZoneGroup + returned: always + type: str + name: + description: + - Name of the private endpoint zone group. + returned: always + type: str + sample: myZoneGroup + private_dns_zone_configs: + description: + - List of zone configuration within the zone group. + returned: always + type: complex + contains: + name: + description: + - Name of the zone config. + returned: always + type: str + sample: default + private_dns_zone_id: + description: + - ID of the private DNS zone. + returned: always + type: str + sample: >- + /subscriptions/xxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/ + privateDnsZones/privatelink.postgres.database.azure.com + record_sets: + description: + - List of DNS records for zone. + returned: always + type: complex + contains: + fqdn: + description: + - Fully qualified domain name of the record. + returned: always + type: str + sample: myPostgreSqlSrv-123.privatelink.postgres.database.azure.com + ip_addresses: + description: + - IP addresses for the record. + returned: always + type: list + sample: ['10.1.0.4'] + provisioning_state: + description: + - Provisioning state of the resource. + returned: always + type: str + sample: Succeeded + record_set_name: + description: + - Name of the record. + returned: always + type: str + sample: myPostgreSqlSrv-123 + record_type: + description: + - Type of record. + returned: always + type: str + sample: A + ttl: + description: + - Time to live value of the record. + returned: always + type: int + sample: 10 + provisioning_state: + description: + - Provisioning state of the resource. + returned: always + type: str + sample: Succeeded +''' + +try: + from azure.core.exceptions import ResourceNotFoundError +except Exception: + # This is handled in azure_rm_common + pass + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + + +class AzureRMPrivateEndpointDnsZoneGroupInfo(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type="str"), + private_endpoint=dict(type="str", required=True), + resource_group=dict(type="str", required=True), + ) + + self.results = dict( + changed=False, + groups=[], + ) + + self.name = None + self.private_endpoint = None + self.resource_group = None + self.results = dict( + changed=False, + ) + + super(AzureRMPrivateEndpointDnsZoneGroupInfo, self).__init__(self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + facts_module=True) + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()): + setattr(self, key, kwargs[key]) + + if self.name is not None: + self.results["groups"] = self.get_item() + else: + self.results["groups"] = self.list_items() + + return self.results + + def get_item(self): + self.log("Get properties for {0} in {1}".format(self.name, self.private_endpoint)) + + try: + item = self.network_client.private_dns_zone_groups.get(resource_group_name=self.resource_group, + private_endpoint_name=self.private_endpoint, + private_dns_zone_group_name=self.name) + return [self.zone_to_dict(item)] + except ResourceNotFoundError: + self.log("Could not get info for {0} in {1}".format(self.name, self.private_endpoint)) + + return [] + + def list_items(self): + self.log("List all in {0}".format(self.private_endpoint)) + try: + items = self.network_client.private_dns_zone_groups.list(private_endpoint_name=self.private_endpoint, resource_group_name=self.resource_group) + return [self.zone_to_dict(item) for item in items] + except ResourceNotFoundError as exc: + self.fail("Failed to list all items in {0}: {1}".format(self.private_endpoint, str(exc))) + + def zone_to_dict(self, zone): + zone_dict = zone.as_dict() + return dict( + id=zone_dict.get("id"), + name=zone_dict.get("name"), + private_dns_zone_configs=[self.zone_config_to_dict(zone_config) for zone_config in zone_dict.get("private_dns_zone_configs", [])], + provisioning_state=zone_dict.get("provisioning_state"), + ) + + def zone_config_to_dict(self, zone_config): + return dict( + id=zone_config.get("id"), + name=zone_config.get("name"), + private_dns_zone_id=zone_config.get("private_dns_zone_id"), + record_sets=[self.record_set_to_dict(record_set) for record_set in zone_config.get("record_sets", [])], + ) + + def record_set_to_dict(self, record_set): + return dict( + fqdn=record_set.get("fqdn"), + ip_addresses=record_set.get("ip_addresses"), + provisioning_state=record_set.get("provisioning_state"), + record_set_name=record_set.get("record_set_name"), + record_type=record_set.get("record_type"), + ttl=record_set.get("ttl"), + ) + + +def main(): + AzureRMPrivateEndpointDnsZoneGroupInfo() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_privatelinkservice.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_privatelinkservice.py new file mode 100644 index 000000000..611adb161 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_privatelinkservice.py @@ -0,0 +1,596 @@ +#!/usr/bin/python +# +# Copyright (c) 2022 xuzhang3 (@xuzhang3), Fred-sun (@Fred-sun) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_privatelinkservice + +version_added: "1.12.0" + +short_description: Managed private link service resource + +description: + - Create, Update or Delete private link service resource. + +options: + name: + description: + - The name of the private link service. + type: str + required: True + resource_group: + description: + - The name of the resource group. + type: str + required: True + location: + description: + - The resource location. + type: str + load_balancer_frontend_ip_configurations: + description: + - An array of references to the load balancer IP configurations + - Cannot have more than one load balancer frontend IP configuration on the private link service. + type: list + elements: dict + suboptions: + id: + description: + - The load balancer frontend IP's ID. + type: str + fqdns: + description: + - The list of Fqdn. + elements: str + type: list + auto_approval: + description: + - The auto-approval list of the private link service. + type: dict + suboptions: + subscriptions: + description: + - The list of subscriptions. + type: list + elements: str + visibility: + description: + - The visibility list of the private link service. + type: dict + suboptions: + subscriptions: + description: + - The list of subscriptions. + type: list + elements: str + enable_proxy_protocol: + description: + - Whether the private link service is enabled for proxy protocol or not. + type: bool + ip_configurations: + description: + - An array of private link service IP configurations. + type: list + elements: dict + suboptions: + name: + description: + - The name of private link service ip configuration. + type: str + properties: + description: + - The property of the private link service IP configurations. + type: dict + suboptions: + primary: + description: + - Whether the ip configuration is primary or not. + type: bool + private_ip_allocation_method: + description: + - The private IP address allocation method. + type: str + choices: + - Static + - Dynamic + private_ip_address_version: + description: + - Whether the specific IP configuration is IPv4 or IPv6. + type: str + choices: + - IPv4 + - IPv6 + subnet: + description: + - The reference to the subnet resource. + type: dict + suboptions: + id: + description: + - The ID of the subnet. + type: str + state: + description: + - Assert the state of the pirvate link service. + - Use I(state=present) to create or update the link service and I(state=absent) to delete it. + type: str + default: present + choices: + - present + - absent + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - xuzhang3 (@xuzhang3) + - Fred-sun (@Fred-sun) +''' + +EXAMPLES = ''' +- name: Create the private link service + azure_rm_privatelinkservice: + name: linkservice-name + resource_group: myResourceGroup + enable_proxy_protocol: True + fqdns: + - 'dns01.com' + - 'dns02.com' + visibility: + subscriptions: + - xxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + - yyyyyy-yyyyy-yyyy-yyyy-yyyyyyyyyyy + auto_approval: + subscriptions: + - xxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + - yyyyyy-yyyyy-yyyy-yyyy-yyyyyyyyyyy + load_balancer_frontend_ip_configurations: + - id: load_balancer_frontend_ip_configurations_id + ip_configurations: + - name: testSubnet + properties: + primary: False + private_ip_allocation_method: 'Dynamic' + private_ip_address_version: 'IPv4' + subnet: + id: subnet_id + - name: subnetfredprivate-1 + properties: + primary: True + private_ip_allocation_method: 'Static' + private_ip_address_version: 'IPv4' + subnet: + id: subnet_id + tags: + key1: value1 + key2: value2 + +- name: delete the private link service + azure_rm_privatelinkservice: + name: linkservice-name + resource_group: myResourceGroup + state: absent +''' + +RETURN = ''' +link_service: + description: + - List of private link service info. + returned: always + type: complex + contains: + id: + description: + - Resource ID of the private link service. + sample: "/subscriptions/xxx-xxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/privateLinkServices/linkservice" + returned: always + type: str + name: + description: + - Name of the private link service. + returned: always + type: str + sample: linkservice + location: + description: + - Resource location. + returned: always + type: str + sample: eastus + alias: + description: + - The alias of the private link service. + type: str + returned: always + sample: "linkservice.6a244dd8-8416-40cf-8c04-52b353bdd507.eastus.azure.privatelinkservice" + auto_approval: + description: + - The auto-approval list of the private link service. + type: dict + returned: always + sample: { "subscriptions": ['xxxx-xxxx', 'yyyy-yyyyy'] } + enable_proxy_protocol: + description: + - Whether the private link service is enabled for proxy protocol or not. + type: bool + returned: always + sample: False + etag: + description: + - A unique read-only string that changes whenever the resource is updated. + type: str + returned: always + sample: "f7d60f37-ea2b-4091-8546-1327f35468c4" + type: + description: + - The resource type. + type: str + returned: always + sample: Microsoft.Network/privateLinkServices + visibility: + description: + - The visibility list of the private link service. + type: dict + returned: always + sample: { "subscriptions": ['xxxx-xxxx', 'yyyy-yyyyy'] } + tags: + description: + - The resource tags. + type: dict + returned: always + sample: { 'key1': 'value1' } + provisioning_state: + description: + - The provisioning state of the private link service resource. + type: str + returned: always + sample: Succeeded + network_interfaces: + description: + - An array of references to the network interfaces created for this private link service. + type: list + returned: always + sample: [{ "id": "/subscriptions/xxx-xxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/networkInterfaces/testlinkservice.nic.f5"}] + ip_configurations: + description: + - An array of private link service IP configurations. + type: complex + returned: always + contains: + name: + description: + - The name of the IP configurations + type: str + returned: always + sample: subnetfredprivate-1 + properties: + description: + - The IP configuration properties. + type: complex + returned: always + contains: + primary: + description: + - Whether the ip configuration is primary or not. + returned: always + type: bool + sample: True + private_ip_address_version: + description: + - Whether the specific IP configuration is IPv4 or IPv6. + returned: always + type: str + sample: IPv4 + private_ip_allocation_method: + description: + - The private IP address allocation method. + returned: always + type: str + sample: Dynamic + subnet: + description: + - The reference to the subnet resource. + returned: always + type: dict + sample: { "id": "/subscriptions/xxx/resourceGroups/myRG/providers/Microsoft.Network/virtualNetworks/vnet/subnets/subnamee" } + load_balancer_frontend_ip_configurations: + description: + - An array of references to the load balancer IP configurations. + type: list + returned: awalys + sample: [{ "id": "/subscriptions/xxx/resourceGroups/myRG/providers/Microsoft.Network/loadBalancers/testlb/frontendIPConfigurations/front01" }] + fqdns: + description: + - The list of Fqdn. + type: list + returned: always + sample: ['fqdns1.com', 'fqdns2.com'] + private_endpoint_connections: + description: + - An array of list about connections to the private endpoint. + type: complex + returned: always + contains: + id: + description: + - The ID of the private endpoint connection. + type: str + returned: always + sample: "/subscriptions/xxx/resourceGroups/myRG/providers/Microsoft.Network/privateLinkServices/linkservice/privateEndpointConnections/tes" + private_endpoint: + description: + - The ID of the private endpoint. + type: str + returned: always + sample: "/subscriptions/xxx-xxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/privateEndpoints/test002" +''' + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.core.polling import LROPoller +except Exception: + # This is handled in azure_rm_common + pass + + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +load_balancer_frontend_ip_configurations_spec = dict( + id=dict(type='str'), +) + +auto_approval_spec = dict( + subscriptions=dict(type='list', elements='str') +) + +visibility_spec = dict( + subscriptions=dict(type='list', elements='str') +) + +properties_spec = dict( + primary=dict(type='bool'), + # private_ip_address=dict(type='str'), + private_ip_allocation_method=dict(type='str', choices=['Static', 'Dynamic']), + subnet=dict(type='dict', options=dict(id=dict(type='str'))), + private_ip_address_version=dict(type='str', choices=['IPv4', 'IPv6']) +) + + +class AzureRMPrivateLinkService(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type="str", required=True), + resource_group=dict(type="str", required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + location=dict(type='str'), + load_balancer_frontend_ip_configurations=dict( + type='list', + elements='dict', + options=load_balancer_frontend_ip_configurations_spec + ), + ip_configurations=dict( + type='list', + elements='dict', + options=dict( + name=dict(type='str'), + properties=dict(type='dict', options=properties_spec) + ) + ), + visibility=dict(type='dict', options=visibility_spec), + fqdns=dict(type='list', elements='str'), + enable_proxy_protocol=dict(type='bool'), + auto_approval=dict(type='dict', options=auto_approval_spec), + ) + + self.name = None + self.resource_group = None + self.location = None + self.tags = None + self.state = None + self.results = dict( + changed=False, + ) + self.body = {} + + super(AzureRMPrivateLinkService, self).__init__(self.module_arg_spec, + supports_check_mode=True, + supports_tags=True, + facts_module=False) + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()) + ['tags']: + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + self.body[key] = kwargs[key] + + old_response = self.get_item() + result = None + changed = False + + if not self.location: + resource_group = self.get_resource_group(self.resource_group) + self.location = resource_group.location + + self.body['location'] = self.location + self.body['tags'] = self.tags + + if self.state == 'present': + if old_response: + update_tags, tags = self.update_tags(old_response['tags']) + if update_tags: + changed = True + self.body['tags'] = tags + if self.body.get('enable_proxy_protocol') is not None: + if self.body.get('enable_proxy_protocol') != old_response['enable_proxy_protocol']: + changed = True + else: + self.body['enable_proxy_protocol'] = old_response['enable_proxy_protocol'] + + if self.body.get("auto_approval") is not None: + for value in old_response["auto_approval"]['subscriptions']: + if value not in self.body["auto_approval"]['subscriptions']: + self.body["auto_approval"]['subscriptions'].append(value) + if len(self.body["auto_approval"]['subscriptions']) != len(old_response["auto_approval"]['subscriptions']): + changed = True + else: + self.body["auto_approval"] = old_response["auto_approval"] + + if self.body.get("visibility") is not None: + for value in old_response["visibility"]['subscriptions']: + if value not in self.body["visibility"]['subscriptions']: + self.body["visibility"]['subscriptions'].append(value) + if len(self.body["visibility"]['subscriptions']) != len(old_response["visibility"]['subscriptions']): + changed = True + else: + self.body["visibility"] = old_response["visibility"] + + if self.body.get('fqdns') is not None: + for value in old_response['fqdns']: + if value not in self.body['fqdns']: + self.body['fqdns'].append(value) + if len(self.body.get('fqdns')) != len(old_response['fqdns']): + changed = True + else: + self.body['fqdns'] = old_response['fqdns'] + + if self.body.get('load_balancer_frontend_ip_configurations') is not None: + if self.body['load_balancer_frontend_ip_configurations'] != old_response['load_balancer_frontend_ip_configurations']: + self.fail("Private Link Service Load Balancer Reference Cannot Be Changed") + else: + self.body['load_balancer_frontend_ip_configurations'] = old_response['load_balancer_frontend_ip_configurations'] + + if self.body.get('ip_configurations') is not None: + for items in old_response['ip_configurations']: + if items['name'] not in [item['name'] for item in self.body['ip_configurations']]: + self.body['ip_configurations'].append(items) + if len(self.body['ip_configurations']) != len(old_response['ip_configurations']): + changed = True + else: + self.body['ip_configurations'] = old_response['ip_configurations'] + else: + changed = True + + if changed: + if self.check_mode: + self.log("Check mode test. The private link service is exist, will be create or updated") + else: + result = self.create_or_update(self.body) + else: + if self.check_mode: + self.log("Check mode test. The private endpoint connection is exist, No operation in this task") + else: + self.log("The private endpoint connection is exist, No operation in this task") + result = old_response + else: + if old_response: + changed = True + if self.check_mode: + self.log("Check mode test. The private link service is exist, will be deleted") + else: + result = self.delete_resource() + else: + if self.check_mode: + self.log("The private link service isn't exist, no action") + else: + self.log("The private link service isn't exist, don't need to delete") + + self.results["link_service"] = result + self.results['changed'] = changed + return self.results + + def get_item(self): + self.log("Get properties for {0} in {1}".format(self.name, self.resource_group)) + try: + response = self.network_client.private_link_services.get(self.resource_group, self.name) + return self.service_to_dict(response) + except ResourceNotFoundError: + self.log("Could not get info for {0} in {1}".format(self.name, self.resource_group)) + + return [] + + def create_or_update(self, parameters): + self.log("Create or update the private link service for {0} in {1}".format(self.name, self.resource_group)) + try: + response = self.network_client.private_link_services.begin_create_or_update(self.resource_group, self.name, parameters) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + + result = self.network_client.private_link_services.get(self.resource_group, self.name) + return self.service_to_dict(result) + except Exception as ec: + self.fail("Create or Update {0} in {1} failed, mesage {2}".format(self.name, self.resource_group, ec)) + + return [] + + def delete_resource(self): + self.log("delete the private link service for {0} in {1}".format(self.name, self.resource_group)) + try: + response = self.network_client.private_link_services.begin_delete(self.resource_group, self.name) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + return response + except Exception as ec: + self.fail("Delete {0} in {1} failed, message {2}".format(self.name, self.resource_group, ec)) + + return [] + + def service_to_dict(self, service_info): + + service = service_info.as_dict() + result = dict( + id=service.get("id"), + name=service.get('name'), + type=service.get('type'), + etag=service.get('etag'), + location=service.get('location'), + tags=service.get('tags'), + load_balancer_frontend_ip_configurations=service.get('load_balancer_frontend_ip_configurations'), + ip_configurations=list(), + network_interfaces=service.get('network_interfaces'), + provisioning_state=service.get('provisioning_state'), + private_endpoint_connections=list(), + visibility=service.get('visibility'), + fqdns=service.get('fqdns'), + auto_approval=service.get('auto_approval'), + alias=service.get('alias'), + enable_proxy_protocol=service.get('enable_proxy_protocol') + ) + if service.get('private_endpoint_connections'): + for items in service['private_endpoint_connections']: + result['private_endpoint_connections'].append({'id': items['id'], 'private_endpoint': items['private_endpoint']['id']}) + + if service.get('ip_configurations'): + for items in service['ip_configurations']: + result['ip_configurations'].append( + { + "name": items['name'], + 'properties': { + "primary": items['primary'], + "private_ip_address_version": items["private_ip_address_version"], + "private_ip_allocation_method": items["private_ip_allocation_method"], + "subnet": items["subnet"], + } + } + ) + + return result + + +def main(): + AzureRMPrivateLinkService() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_privatelinkservice_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_privatelinkservice_info.py new file mode 100644 index 000000000..8d772ba91 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_privatelinkservice_info.py @@ -0,0 +1,345 @@ +#!/usr/bin/python +# +# Copyright (c) 2022 xuzhang3 (@xuzhang3), Fred-sun (@Fred-sun) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_privatelinkservice_info + +version_added: "1.12.0" + +short_description: Get private endpoint connection info + +description: + - Get facts for private endpoint connection info. + +options: + name: + description: + - The name of the private link service. + type: str + resource_group: + description: + - The name of the resource group. + type: str + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - xuzhang3 (@xuzhang3) + - Fred-sun (@Fred-sun) +''' + +EXAMPLES = ''' +- name: Get private link service info by name + azure_rm_privatelinkservice_info: + name: pn-service-name + resource_group: myResourceGroup + +- name: Get all private link service by resource group + azure_rm_privatelinkservice_info: + resource_group: myResourceGroup + +- name: Get all private link service by subscription filter by tags + azure_rm_privatelinkservice_info: + tags: + - key1 + - abc +''' + +RETURN = ''' +link_service: + description: + - List of private link service info. + returned: always + type: complex + contains: + id: + description: + - Resource ID of the private link service. + sample: "/subscriptions/xxx-xxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/privateLinkServices/linkservice" + returned: always + type: str + name: + description: + - Name of the private link service. + returned: always + type: str + sample: linkservice + location: + description: + - Resource location. + returned: always + type: str + sample: eastus + alias: + description: + - The alias of the private link service. + type: str + returned: always + sample: "linkservice.6a244dd8-8416-40cf-8c04-52b353bdd507.eastus.azure.privatelinkservice" + auto_approval: + description: + - The auto-approval list of the private link service. + type: dict + returned: always + sample: { "subscriptions": [] } + enable_proxy_protocol: + description: + - Whether the private link service is enabled for proxy protocol or not + type: bool + returned: always + sample: False + etag: + description: + - A unique read-only string that changes whenever the resource is updated. + type: str + returned: always + sample: "f7d60f37-ea2b-4091-8546-1327f35468c4" + type: + description: + - The resource type. + type: str + returned: always + sample: Microsoft.Network/privateLinkServices + visibility: + description: + - The visibility list of the private link service. + type: dict + returned: always + sample: { "subscriptions": [] } + tags: + description: + - The resource tags. + type: dict + returned: always + sample: { 'key1': 'value1' } + provisioning_state: + description: + - The provisioning state of the private link service resource. + type: str + returned: always + sample: Succeeded + network_interfaces: + description: + - An array of references to the network interfaces created for this private link service. + type: list + returned: always + sample: [{ "id": "/subscriptions/xxx-xxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/networkInterfaces/testlinkservice.nic.f5"}] + ip_configurations: + description: + - An array of private link service IP configurations. + type: complex + returned: always + contains: + name: + description: + - The name of the IP configurations. + type: str + returned: always + sample: subnetfredprivate-1 + properties: + description: + - The IP configuration properties. + type: complex + returned: always + contains: + primary: + description: + - Whether the ip configuration is primary or not. + returned: always + type: bool + sample: True + private_ip_address_version: + description: + - Whether the specific IP configuration is IPv4 or IPv6. + returned: always + type: str + sample: IPv4 + private_ip_allocation_method: + description: + - The private IP address allocation method. + returned: always + type: str + sample: Dynamic + subnet: + description: + - The reference to the subnet resource. + returned: always + type: dict + sample: { "id": "/subscriptions/xxx/resourceGroups/myRG/providers/Microsoft.Network/virtualNetworks/vnet/subnets/subname" } + load_balancer_frontend_ip_configurations: + description: + - An array of references to the load balancer IP configurations. + type: list + returned: awalys + sample: [{ "id": "/subscriptions/xxx/resourceGroups/myRG/providers/Microsoft.Network/loadBalancers/testlb/frontendIPConfigurations/front01" }] + fqdns: + description: + - The list of Fqdn. + type: list + returned: always + sample: ['fqdns1.com', 'fqdns2.com'] + private_endpoint_connections: + description: + - An array of list about connections to the private endpoint. + type: complex + returned: always + contains: + id: + description: + - The ID of the private endpoint connection. + type: str + returned: always + sample: "/subscriptions/xxx/resourceGroups/myReG/providers/Microsoft.Network/privateLinkServices/linkservice/privateEndpointConnections/tes" + private_endpoint: + description: + - The ID of the private endpoint. + type: str + returned: always + sample: "/subscriptions/xxx-xxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/privateEndpoints/test002" +''' + +try: + from azure.core.exceptions import ResourceNotFoundError +except Exception: + # This is handled in azure_rm_common + pass + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + + +class AzureRMPrivateLinkServiceInfo(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type="str"), + resource_group=dict(type="str"), + tags=dict(type='list', elements='str') + ) + + self.name = None + self.tags = None + self.resource_group = None + self.results = dict( + changed=False, + ) + + super(AzureRMPrivateLinkServiceInfo, self).__init__(self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + facts_module=True) + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()): + setattr(self, key, kwargs[key]) + + if self.name is not None and self.resource_group is not None: + result = self.get_item() + elif self.resource_group is not None: + result = self.list_resourcegroup() + else: + result = self.list_by_subscription() + + self.results["link_service"] = [item for item in result if item and self.has_tags(item['tags'], self.tags)] + return self.results + + def get_item(self): + self.log("Get properties for {0} in {1}".format(self.name, self.resource_group)) + + try: + response = self.network_client.private_link_services.get(self.resource_group, self.name) + return [self.service_to_dict(response)] + except ResourceNotFoundError: + self.log("Could not get info for {0} in {1}".format(self.name, self.resource_group)) + + return [] + + def list_resourcegroup(self): + result = [] + self.log("List all in {0}".format(self.resource_group)) + try: + response = self.network_client.private_link_services.list(self.resource_group) + while True: + result.append(response.next()) + except StopIteration: + pass + except Exception: + pass + return [self.service_to_dict(item) for item in result] + + def list_by_subscription(self): + result = [] + self.log("List all in by subscription") + try: + response = self.network_client.private_link_services.list_by_subscription() + while True: + result.append(response.next()) + except StopIteration: + pass + except Exception: + pass + return [self.service_to_dict(item) for item in result] + + def service_to_dict(self, service_info): + + service = service_info.as_dict() + result = dict( + id=service.get("id"), + name=service.get('name'), + type=service.get('type'), + etag=service.get('etag'), + location=service.get('location'), + tags=service.get('tags'), + load_balancer_frontend_ip_configurations=service.get('load_balancer_frontend_ip_configurations'), + ip_configurations=list(), + network_interfaces=service.get('network_interfaces'), + provisioning_state=service.get('provisioning_state'), + private_endpoint_connections=list(), + visibility=service.get('visibility'), + auto_approval=service.get('auto_approval'), + fqdns=service.get('fqdns'), + alias=service.get('alias'), + enable_proxy_protocol=service.get('enable_proxy_protocol') + ) + if service.get('private_endpoint_connections'): + for items in service['private_endpoint_connections']: + result['private_endpoint_connections'].append({'id': items['id'], 'private_endpoint': items['private_endpoint']['id']}) + + if service.get('ip_configurations'): + for items in service['ip_configurations']: + result['ip_configurations'].append( + { + "name": items['name'], + 'properties': { + "primary": items['primary'], + "private_ip_address_version": items["private_ip_address_version"], + "private_ip_allocation_method": items["private_ip_allocation_method"], + "subnet": items["subnet"], + } + } + ) + + return result + + +def main(): + AzureRMPrivateLinkServiceInfo() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_proximityplacementgroup.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_proximityplacementgroup.py new file mode 100644 index 000000000..da68e776e --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_proximityplacementgroup.py @@ -0,0 +1,251 @@ +#!/usr/bin/python +# +# Copyright (c) 2021 Aparna Patil(@techcon65) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_proximityplacementgroup + +version_added: "1.6.0" + +short_description: Create, delete and update proximity placement group + +description: + - Creates, deletes, and updates proximity placement group. + +options: + resource_group: + description: + - Name of resource group. + required: true + type: str + name: + description: + - The name of the proximity placement group. + required: true + type: str + location: + description: + - Valid Azure location for proximity placement group. Defaults to location of resource group. + type: str + state: + description: + - Assert the state of the placement group. Use C(present) to create or update and C(absent) to delete. + default: present + type: str + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Aparna Patil (@techcon65) +''' + +EXAMPLES = ''' +- name: Create a proximity placement group + azure_rm_proximityplacementgroup: + resource_group: myAzureResourceGroup + location: eastus + name: myppg + state: present + +- name: Update proximity placement group + azure_rm_proximityplacementgroup: + resource_group: myAzureResourceGroup + location: eastus + name: myppg + tags: + key1: "value1" + state: present + +- name: Delete a proximity placement group + azure_rm_proximityplacementgroup: + resource_group: myAzureResourceGroup + name: myppg + state: absent +''' + +RETURN = ''' +state: + description: + - Current state of the proximity placement group. + returned: always + type: complex + contains: + id: + description: + - The proximity placement group ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myAzureResourceGroup/providers/ + Microsoft.Compute/proximityPlacementGroups/myppg" + name: + description: + - The proximity placement group name. + returned: always + type: str + sample: 'myppg' + location: + description: + - The Azure Region where the resource lives. + returned: always + type: str + sample: eastus + proximity_placement_group_type: + description: + - The type of proximity placement group. + returned: always + type: str + sample: Standard + tags: + description: + - Resource tags. + returned: always + type: list + sample: [{"key1": "value1"}] + type: + description: + - The type of resource. + returned: always + type: str + sample: Microsoft.Compute/proximityPlacementGroups +''' + +from ansible.module_utils.basic import _load_params +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase, HAS_AZURE, \ + format_resource_id, normalize_location_name + +try: + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMProximityPlacementGroup(AzureRMModuleBase): + + def __init__(self): + + _load_params() + # define user inputs from playbook + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True), + name=dict(type='str', required=True), + location=dict(type='str'), + state=dict(choices=['present', 'absent'], default='present', type='str') + ) + + self.results = dict( + changed=False, + state=dict() + ) + + self.resource_group = None + self.name = None + self.state = None + self.location = None + self.tags = None + + super(AzureRMProximityPlacementGroup, self).__init__(self.module_arg_spec, + supports_check_mode=True) + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + changed = False + results = dict() + proximity_placement_group = None + + # retrieve resource group to make sure it exists + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + # Set default location + self.location = resource_group.location + + self.location = normalize_location_name(self.location) + + try: + self.log('Fetching Proximity placement group {0}'.format(self.name)) + proximity_placement_group = self.compute_client.proximity_placement_groups.get(self.resource_group, + self.name) + # serialize object into a dictionary + results = self.ppg_to_dict(proximity_placement_group) + if self.state == 'present': + changed = False + update_tags, results['tags'] = self.update_tags(results['tags']) + if update_tags: + changed = True + self.tags = results['tags'] + elif self.state == 'absent': + changed = True + + except ResourceNotFoundError: + if self.state == 'present': + changed = True + else: + changed = False + + self.results['changed'] = changed + self.results['state'] = results + + if self.check_mode: + return self.results + + if changed: + if self.state == 'present': + # create or update proximity placement group + proximity_placement_group_new = \ + self.compute_models.ProximityPlacementGroup(location=self.location, + proximity_placement_group_type='Standard') + if self.tags: + proximity_placement_group_new.tags = self.tags + self.results['state'] = self.create_or_update_placementgroup(proximity_placement_group_new) + + elif self.state == 'absent': + # delete proximity placement group + self.delete_placementgroup() + self.results['state'] = 'Deleted' + + return self.results + + def create_or_update_placementgroup(self, proximity_placement_group): + try: + # create the placement group + response = self.compute_client.proximity_placement_groups.create_or_update(resource_group_name=self.resource_group, + proximity_placement_group_name=self.name, + parameters=proximity_placement_group) + except Exception as exc: + self.fail("Error creating or updating proximity placement group {0} - {1}".format(self.name, str(exc))) + return self.ppg_to_dict(response) + + def delete_placementgroup(self): + try: + # delete the placement group + response = self.compute_client.proximity_placement_groups.delete(resource_group_name=self.resource_group, + proximity_placement_group_name=self.name) + except Exception as exc: + self.fail("Error deleting proximity placement group {0} - {1}".format(self.name, str(exc))) + return response + + def ppg_to_dict(self, proximityplacementgroup): + result = proximityplacementgroup.as_dict() + result['tags'] = proximityplacementgroup.tags + return result + + +def main(): + AzureRMProximityPlacementGroup() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_proximityplacementgroup_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_proximityplacementgroup_info.py new file mode 100644 index 000000000..b66808b06 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_proximityplacementgroup_info.py @@ -0,0 +1,208 @@ +#!/usr/bin/python +# +# Copyright (c) 2021 Aparna Patil(@techcon65) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_proximityplacementgroup_info + +version_added: "1.6.0" + +short_description: Get proximity placement group facts + +description: + - Get facts for specified proximity placement group or all proximity placement groups in a given resource group. + +options: + resource_group: + description: + - Name of resource group. + type: str + name: + description: + - The name of the proximity placement group. + type: str + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Aparna Patil (@techcon65) + +''' + +EXAMPLES = ''' +- name: Get facts for one proximity placement group + azure_rm_proximityplacementgroup_info: + resource_group: myAzureResourceGroup + name: myppg + +- name: Get facts for all proximity placement groups in resource group + azure_rm_proximityplacementgroup_info: + resource_group: myAzureResourceGroup +''' + +RETURN = ''' +proximityplacementgroups: + description: + - Gets a list of proximity placement groups. + returned: always + type: list + elements: dict + sample: [ + { + "availability_sets": [ + { + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myAzureResourceGroup/ + providers/Microsoft.Compute/availabilitySets/availabilityset1" + }, + { + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myAzureResourceGroup/ + providers/Microsoft.Compute/availabilitySets/availabilityset2" + } + ], + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myAzureResourceGroup/providers/ + Microsoft.Compute/proximityPlacementGroups/myppg", + "location": "eastus", + "name": "myppg", + "proximity_placement_group_type": "Standard", + "tags": {}, + "virtual_machine_scale_sets": [], + "virtual_machines": [ + { + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myAzureResourceGroup/ + providers/Microsoft.Compute/virtualMachines/mylinuxvm" + } + ] + } + ] +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError +except Exception: + # This is handled in azure_rm_common + pass + +AZURE_OBJECT_CLASS = 'ProximityPlacementGroup' + + +class AzureRMProximityPlacementGroupInfo(AzureRMModuleBase): + + def __init__(self): + + # define user inputs into argument + self.module_arg_spec = dict( + name=dict(type='str'), + resource_group=dict(type='str'), + tags=dict(type='list', elements='str') + ) + + # store the results of the module operation + self.results = dict( + changed=False + ) + + self.name = None + self.resource_group = None + self.tags = None + + super(AzureRMProximityPlacementGroupInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False, facts_module=True) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + results = [] + # list the conditions and results to return based on user input + if self.name is not None: + # if there is a group name provided, return facts about that specific proximity placement group + results = self.get_item() + elif self.resource_group: + # all the proximity placement groups listed in specific resource group + results = self.list_resource_group() + else: + # all the proximity placement groups in a subscription + results = self.list_items() + + self.results['proximityplacementgroups'] = self.curated_items(results) + + return self.results + + def get_item(self): + self.log('Get properties for {0}'.format(self.name)) + item = None + results = [] + # get specific proximity placement group + try: + item = self.compute_client.proximity_placement_groups.get(self.resource_group, self.name) + except ResourceNotFoundError: + pass + + # serialize result + if item and self.has_tags(item.tags, self.tags): + results = [item] + return results + + def list_resource_group(self): + self.log('List all proximity placement groups for resource group - {0}'.format(self.resource_group)) + try: + response = self.compute_client.proximity_placement_groups.list_by_resource_group(self.resource_group) + except ResourceNotFoundError as exc: + self.fail("Failed to list for resource group {0} - {1}".format(self.resource_group, str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(item) + return results + + def list_items(self): + self.log('List all proximity placement groups for a subscription ') + try: + response = self.compute_client.proximity_placement_groups.list_by_subscription() + except ResourceNotFoundError as exc: + self.fail("Failed to list all items - {0}".format(str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(item) + return results + + def curated_items(self, raws): + return [self.ppg_to_dict(item) for item in raws] if raws else [] + + def ppg_to_dict(self, ppg): + result = dict( + id=ppg.id, + name=ppg.name, + location=ppg.location, + tags=ppg.tags, + proximity_placement_group_type=ppg.proximity_placement_group_type, + virtual_machines=[dict(id=x.id) for x in ppg.virtual_machines], + virtual_machine_scale_sets=[dict(id=x.id) for x in ppg.virtual_machine_scale_sets], + availability_sets=[dict(id=x.id) for x in ppg.availability_sets] + ) + return result + + +def main(): + AzureRMProximityPlacementGroupInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_publicipaddress.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_publicipaddress.py new file mode 100644 index 000000000..45fc9c807 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_publicipaddress.py @@ -0,0 +1,445 @@ +#!/usr/bin/python +# +# Copyright (c) 2016 Matt Davis, +# Chris Houseknecht, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_publicipaddress + +version_added: "0.1.0" + +short_description: Manage Azure Public IP Addresses + +description: + - Create, update and delete a Public IP address. + - Allows setting and updating the address allocation method and domain name label. + - Use the M(azure.azcollection.azure_rm_networkinterface) module to associate a Public IP with a network interface. + +options: + resource_group: + description: + - Name of resource group with which the Public IP is associated. + required: true + allocation_method: + description: + - Control whether the assigned Public IP remains permanently assigned to the object. + - If not set to C(Static), the IP address may changed anytime an associated virtual machine is power cycled. + choices: + - dynamic + - static + - Static + - Dynamic + default: dynamic + domain_name: + description: + - The customizable portion of the FQDN assigned to public IP address. This is an explicit setting. + - If no value is provided, any existing value will be removed on an existing public IP. + aliases: + - domain_name_label + name: + description: + - Name of the Public IP. + required: true + state: + description: + - Assert the state of the Public IP. Use C(present) to create or update a and C(absent) to delete. + default: present + choices: + - absent + - present + location: + description: + - Valid Azure location. Defaults to location of the resource group. + sku: + description: + - The public IP address SKU. + - When I(version=ipv6), if I(sku=standard) then set I(allocation_method=static). + - When I(version=ipv4), if I(sku=standard) then set I(allocation_method=static). + choices: + - basic + - standard + - Basic + - Standard + ip_tags: + description: + - List of IpTag associated with the public IP address. + - Each element should contain type:value pair. + suboptions: + type: + description: + - Sets the ip_tags type. + value: + description: + - Sets the ip_tags value. + idle_timeout: + description: + - Idle timeout in minutes. + type: int + version: + description: + - The public IP address version. + choices: + - ipv4 + - ipv6 + default: ipv4 + zones: + description: + - A list of availability zones denoting the IP allocated for the resource needs to come from. + type: list + elements: str + choices: + - '1' + - '2' + - '3' + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Chris Houseknecht (@chouseknecht) + - Matt Davis (@nitzmahone) +''' + +EXAMPLES = ''' + - name: Create a public ip address + azure_rm_publicipaddress: + resource_group: myResourceGroup + name: my_public_ip + allocation_method: static + domain_name: foobar + + - name: Delete public ip + azure_rm_publicipaddress: + resource_group: myResourceGroup + name: my_public_ip + state: absent +''' + +RETURN = ''' +state: + description: + - Facts about the current state of the object. + returned: always + type: complex + contains: + dns_settings: + description: + - The FQDN of the DNS record associated with the public IP address. + returned: always + type: dict + sample: { + "domain_name_label": "ansible-b57dc95985712e45eb8b9c2e", + "fqdn": "ansible-b57dc95985712e45eb8b9c2e.eastus.cloudapp.azure.com", + "reverse_fqdn": null + } + etag: + description: + - A unique read-only string that changes whenever the resource is updated. + returned: always + type: str + sample: "W/'1905ee13-7623-45b1-bc6b-4a12b2fb9d15'" + idle_timeout_in_minutes: + description: + - The idle timeout of the public IP address. + returned: always + type: int + sample: 4 + ip_address: + description: + - The Public IP Prefix this Public IP Address should be allocated from. + returned: always + type: str + sample: 52.160.103.93 + location: + description: + - Resource location. + returned: always + type: str + example: eastus + name: + description: + - Name of the Public IP Address. + returned: always + type: str + example: publicip002 + provisioning_state: + description: + - The provisioning state of the Public IP resource. + returned: always + type: str + example: Succeeded + public_ip_allocation_method: + description: + - The public IP allocation method. + returned: always + type: str + sample: static + public_ip_address_version: + description: + - The public IP address version. + returned: always + type: str + sample: ipv4 + sku: + description: + - The public IP address SKU. + returned: always + type: str + sample: Basic + tags: + description: + - The resource tags. + returned: always + type: dict + sample: { + "delete": "on-exit", + "testing": "testing" + } + type: + description: + - Type of the resource. + returned: always + type: str + sample: "Microsoft.Network/publicIPAddresses" + zones: + description: + - A list of availability zones denoting the IP allocated for the resource needs to come from. + returned: always + type: list + sample: ['1', '2'] +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +from ansible.module_utils._text import to_native + +try: + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + + +def pip_to_dict(pip): + result = dict( + name=pip.name, + type=pip.type, + location=pip.location, + tags=pip.tags, + public_ip_allocation_method=pip.public_ip_allocation_method.lower(), + public_ip_address_version=pip.public_ip_address_version.lower(), + dns_settings=dict(), + ip_address=pip.ip_address, + idle_timeout_in_minutes=pip.idle_timeout_in_minutes, + provisioning_state=pip.provisioning_state, + etag=pip.etag, + sku=pip.sku.name, + zones=pip.zones + ) + if pip.dns_settings: + result['dns_settings']['domain_name_label'] = pip.dns_settings.domain_name_label + result['dns_settings']['fqdn'] = pip.dns_settings.fqdn + result['dns_settings']['reverse_fqdn'] = pip.dns_settings.reverse_fqdn + if pip.ip_tags: + result['ip_tags'] = [dict(type=to_native(x.ip_tag_type), value=to_native(x.tag)) for x in pip.ip_tags] + return result + + +ip_tag_spec = dict( + type=dict(type='str', required=True), + value=dict(type='str', required=True) +) + + +class AzureRMPublicIPAddress(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True), + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + location=dict(type='str'), + version=dict(type='str', default='ipv4', choices=['ipv4', 'ipv6']), + allocation_method=dict(type='str', default='dynamic', choices=['Dynamic', 'Static', 'dynamic', 'static']), + domain_name=dict(type='str', aliases=['domain_name_label']), + sku=dict(type='str', choices=['Basic', 'Standard', 'basic', 'standard']), + ip_tags=dict(type='list', elements='dict', options=ip_tag_spec), + idle_timeout=dict(type='int'), + zones=dict(type='list', elements='str', choices=['1', '2', '3']) + ) + + self.resource_group = None + self.name = None + self.location = None + self.state = None + self.tags = None + self.zones = None + self.allocation_method = None + self.domain_name = None + self.sku = None + self.version = None + self.ip_tags = None + self.idle_timeout = None + + self.results = dict( + changed=False, + state=dict() + ) + + super(AzureRMPublicIPAddress, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + results = dict() + changed = False + pip = None + + # capitalize the sku and allocation_method. basic => Basic, Basic => Basic. + self.allocation_method = self.allocation_method.capitalize() if self.allocation_method else None + self.sku = self.sku.capitalize() if self.sku else None + self.version = 'IPv4' if self.version == 'ipv4' else 'IPv6' + + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + # Set default location + self.location = resource_group.location + + try: + self.log("Fetch public ip {0}".format(self.name)) + pip = self.network_client.public_ip_addresses.get(self.resource_group, self.name) + self.check_provisioning_state(pip, self.state) + self.log("PIP {0} exists".format(self.name)) + if self.state == 'present': + results = pip_to_dict(pip) + domain_lable = results['dns_settings'].get('domain_name_label') + if self.domain_name is not None and ((self.domain_name or domain_lable) and self.domain_name != domain_lable): + self.log('CHANGED: domain_name_label') + changed = True + results['dns_settings']['domain_name_label'] = self.domain_name + + if self.allocation_method.lower() != results['public_ip_allocation_method'].lower(): + self.log("CHANGED: allocation_method") + changed = True + results['public_ip_allocation_method'] = self.allocation_method + + if self.sku and self.sku != results['sku']: + self.log("CHANGED: sku") + changed = True + results['sku'] = self.sku + + if self.version.lower() != results['public_ip_address_version'].lower(): + self.log("CHANGED: version") + changed = True + results['public_ip_address_version'] = self.version + + if self.idle_timeout and self.idle_timeout != results['idle_timeout_in_minutes']: + self.log("CHANGED: idle_timeout") + changed = True + results['idle_timeout_in_minutes'] = self.idle_timeout + + if self.zones and self.zones != results['zones']: + self.log("Zones defined do not same with existing zones") + changed = False + self.fail("ResourceAvailabilityZonesCannotBeModified: defines is {0}, existing is {1}".format(self.zones, results['zones'])) + + if str(self.ip_tags or []) != str(results.get('ip_tags') or []): + self.log("CHANGED: ip_tags") + changed = True + results['ip_tags'] = self.ip_tags + + update_tags, results['tags'] = self.update_tags(results['tags']) + if update_tags: + changed = True + + elif self.state == 'absent': + self.log("CHANGED: public ip {0} exists but requested state is 'absent'".format(self.name)) + changed = True + except ResourceNotFoundError: + self.log('Public ip {0} does not exist'.format(self.name)) + if self.state == 'present': + self.log("CHANGED: pip {0} does not exist but requested state is 'present'".format(self.name)) + changed = True + + self.results['state'] = results + self.results['changed'] = changed + + if self.check_mode: + return results + + if changed: + if self.state == 'present': + if not pip: + self.log("Create new Public IP {0}".format(self.name)) + pip = self.network_models.PublicIPAddress( + location=self.location, + public_ip_address_version=self.version, + public_ip_allocation_method=self.allocation_method, + sku=self.network_models.PublicIPAddressSku(name=self.sku) if self.sku else None, + idle_timeout_in_minutes=self.idle_timeout if self.idle_timeout and self.idle_timeout > 0 else None, + zones=self.zones + ) + if self.ip_tags: + pip.ip_tags = [self.network_models.IpTag(ip_tag_type=x['type'], tag=x['value']) for x in self.ip_tags] + if self.tags: + pip.tags = self.tags + if self.domain_name: + pip.dns_settings = self.network_models.PublicIPAddressDnsSettings( + domain_name_label=self.domain_name + ) + else: + self.log("Update Public IP {0}".format(self.name)) + pip = self.network_models.PublicIPAddress( + location=results['location'], + public_ip_allocation_method=results['public_ip_allocation_method'], + sku=self.network_models.PublicIPAddressSku(name=self.sku) if self.sku else None, + tags=results['tags'], + zones=results['zones'] + ) + if self.domain_name: + pip.dns_settings = self.network_models.PublicIPAddressDnsSettings( + domain_name_label=self.domain_name + ) + self.results['state'] = self.create_or_update_pip(pip) + elif self.state == 'absent': + self.log('Delete public ip {0}'.format(self.name)) + self.delete_pip() + + return self.results + + def create_or_update_pip(self, pip): + try: + poller = self.network_client.public_ip_addresses.begin_create_or_update(self.resource_group, self.name, pip) + pip = self.get_poller_result(poller) + except Exception as exc: + self.fail("Error creating or updating {0} - {1}".format(self.name, str(exc))) + return pip_to_dict(pip) + + def delete_pip(self): + try: + poller = self.network_client.public_ip_addresses.begin_delete(self.resource_group, self.name) + self.get_poller_result(poller) + except Exception as exc: + self.fail("Error deleting {0} - {1}".format(self.name, str(exc))) + # Delete returns nada. If we get here, assume that all is well. + self.results['state']['status'] = 'Deleted' + return True + + +def main(): + AzureRMPublicIPAddress() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_publicipaddress_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_publicipaddress_info.py new file mode 100644 index 000000000..63c670529 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_publicipaddress_info.py @@ -0,0 +1,327 @@ +#!/usr/bin/python +# +# Copyright (c) 2016 Matt Davis, +# Chris Houseknecht, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_publicipaddress_info + +version_added: "0.1.2" + +short_description: Get public IP facts + +description: + - Get facts for a specific public IP or all public IPs within a resource group. + +options: + name: + description: + - Only show results for a specific Public IP. + resource_group: + description: + - Limit results by resource group. Required when using name parameter. + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Chris Houseknecht (@chouseknecht) + - Matt Davis (@nitzmahone) +''' + +EXAMPLES = ''' + - name: Get facts for one Public IP + azure_rm_publicipaddress_info: + resource_group: myResourceGroup + name: publicip001 + + - name: Get facts for all Public IPs within a resource groups + azure_rm_publicipaddress_info: + resource_group: myResourceGroup + tags: + - key:value +''' + +RETURN = ''' +azure_publicipaddresses: + description: + - List of public IP address dicts. + - Please note that this option will be deprecated in 2.10 when curated format will become the only supported format. + returned: always + type: list + example: [{ + "etag": 'W/"a31a6d7d-cb18-40a5-b16d-9f4a36c1b18a"', + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/publicIPAddresses/pip2001", + "location": "eastus2", + "name": "pip2001", + "properties": { + "idleTimeoutInMinutes": 4, + "provisioningState": "Succeeded", + "publicIPAllocationMethod": "Dynamic", + "resourceGuid": "29de82f4-a7da-440e-bd3d-9cabb79af95a" + }, + "type": "Microsoft.Network/publicIPAddresses" + }] +publicipaddresses: + description: + - List of publicipaddress. + - Contains the detail which matches azure_rm_publicipaddress parameters. + - Returned when the format parameter set to curated. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: /subscriptions/xxx---xxxxx/resourceGroups/v-xisuRG/providers/Microsoft.Network/publicIPAddresses/pipb57dc95224 + name: + description: + - Name of the public IP address. + returned: always + type: str + sample: pipb57dc95224 + type: + description: + - Resource type. + returned: always + type: str + sample: "Microsoft.Network/publicIPAddresses" + location: + description: + - Resource location. + returned: always + type: str + sample: eastus + tags: + description: + - Resource tags. + returned: always + type: dict + sample: { + "delete": "on-exit", + "testing": "testing" + } + allocation_method: + description: + - The public IP allocation method. + - Possible values are C(static) and C(dynamic). + returned: always + type: str + sample: static + version: + description: + - The public IP address version. + - Possible values are C(ipv4) and C(ipv6). + returned: always + type: str + sample: ipv4 + dns_settings: + description: + - The FQDN of the DNS record associated with the public IP address. + returned: always + type: dict + sample: { + "domain_name_label": "ansible-b57dc95985712e45eb8b9c2e", + "fqdn": "ansible-b57dc95985712e45eb8b9c2e.eastus.cloudapp.azure.com", + "reverse_fqdn": null + } + ip_tags: + description: + - The list of tags associated with the public IP address. + returned: always + type: list + sample: [ + { + "type": "FirstPartyUsage", + "value": "Storage" + } + ] + ip_address: + description: + - The Public IP Prefix this Public IP Address should be allocated from. + returned: always + type: str + sample: 40.121.144.14 + idle_timeout: + description: + - The idle timeout of the public IP address. + returned: always + type: int + sample: 4 + provisioning_state: + description: + - The provisioning state of the PublicIP resource. + - Possible values is C(Succeeded). + returned: always + type: str + sample: Succeeded + etag: + description: + - A unique read-only string that changes whenever the resource is updated. + returned: always + type: str + sample: "W/'1905ee13-7623-45b1-bc6b-4a12b2fb9d15'" + sku: + description: + - The public IP address SKU. + returned: always + type: str + sample: Basic + zones: + description: + - A list of availability zones denoting the IP allocated for the resource needs to come from. + returned: always + type: list + sample: ['1', '2'] +''' +try: + from azure.core.exceptions import ResourceNotFoundError +except Exception: + # This is handled in azure_rm_common + pass + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +AZURE_OBJECT_CLASS = 'PublicIp' + + +class AzureRMPublicIPInfo(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type='str'), + resource_group=dict(type='str'), + tags=dict(type='list', elements='str') + ) + + self.results = dict( + changed=False, + ) + + self.name = None + self.resource_group = None + self.tags = None + + super(AzureRMPublicIPInfo, self).__init__(self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + facts_module=True) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_publicipaddress_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_publicipaddress_facts' module has been renamed to 'azure_rm_publicipaddress_info'", version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + result = [] + if self.name and not self.resource_group: + self.fail("Parameter error: resource group required when filtering by name.") + + if self.name: + result = self.get_item() + elif self.resource_group: + result = self.list_resource_group() + else: + result = self.list_all() + + raw = self.filter(result) + + if is_old_facts: + self.results['ansible_facts'] = { + 'azure_publicipaddresses': self.serialize(raw), + } + self.results['publicipaddresses'] = self.format(raw) + + return self.results + + def format(self, raw): + return [self.pip_to_dict(item) for item in raw] + + def serialize(self, raw): + results = [] + for item in raw: + pip = self.serialize_obj(item, AZURE_OBJECT_CLASS) + pip['name'] = item.name + pip['type'] = item.type + results.append(pip) + return results + + def filter(self, response): + return [item for item in response if self.has_tags(item.tags, self.tags)] + + # duplicate with azure_rm_publicipaddress + def pip_to_dict(self, pip): + result = dict( + id=pip.id, + name=pip.name, + type=pip.type, + location=pip.location, + tags=pip.tags, + allocation_method=pip.public_ip_allocation_method.lower(), + version=pip.public_ip_address_version.lower(), + dns_settings=dict(), + ip_tags=dict(), + ip_address=pip.ip_address, + idle_timeout=pip.idle_timeout_in_minutes, + provisioning_state=pip.provisioning_state, + etag=pip.etag, + sku=pip.sku.name, + zones=pip.zones + ) + if pip.dns_settings: + result['dns_settings']['domain_name_label'] = pip.dns_settings.domain_name_label + result['dns_settings']['fqdn'] = pip.dns_settings.fqdn + result['dns_settings']['reverse_fqdn'] = pip.dns_settings.reverse_fqdn + if pip.ip_tags: + result['ip_tags'] = [dict(type=x.ip_tag_type, value=x.tag) for x in pip.ip_tags] + return result + + def get_item(self): + self.log('Get properties for {0}'.format(self.name)) + item = None + try: + item = self.network_client.public_ip_addresses.get(self.resource_group, self.name) + except ResourceNotFoundError: + pass + return [item] if item else [] + + def list_resource_group(self): + self.log('List items in resource groups') + try: + response = self.network_client.public_ip_addresses.list(self.resource_group) + except ResourceNotFoundError as exc: + self.fail("Error listing items in resource groups {0} - {1}".format(self.resource_group, str(exc))) + return response + + def list_all(self): + self.log('List all items') + try: + response = self.network_client.public_ip_addresses.list_all() + except ResourceNotFoundError as exc: + self.fail("Error listing all items - {0}".format(str(exc))) + return response + + +def main(): + AzureRMPublicIPInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_recoveryservicesvault.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_recoveryservicesvault.py new file mode 100644 index 000000000..d793e75bd --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_recoveryservicesvault.py @@ -0,0 +1,310 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 Suyeb Ansari (@suyeb786) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = \ + ''' +--- +module: azure_rm_recoveryservicesvault +version_added: '1.1.0' +short_description: Create and Delete Azure Recovery Services vault +description: + - Create or Delete Azure Recovery Services vault. +options: + resource_group: + description: + - The name of the resource group. + required: true + type: str + name: + description: + - The name of the Azure Recovery Service Vault. + required: true + type: str + location: + description: + - Azure Resource location. + required: true + type: str + state: + description: + - Assert the state of the protection item. + - Use C(present) for Creating Azure Recovery Service Vault. + - Use C(absent) for Deleting Azure Recovery Service Vault. + default: present + type: str + choices: + - present + - absent +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags +author: + - Suyeb Ansari (@suyeb786) +''' + +EXAMPLES = ''' + - name: Create/Update Azure Recovery Service vault + azure_rm_recoveryservicesvault: + resource_group: 'myResourceGroup' + name: 'testVault' + location: 'westeurope' + state: 'present' + - name: Delete Recovery Service Vault + azure_rm_recoveryservicesvault: + resource_group: 'myResourceGroup' + name: 'testVault' + location: 'westeurope' + state: 'absent' +''' + +RETURN = ''' +response: + description: + - The response about the current state of the recovery services vault. + returned: always + type: complex + contains: + etag: + description: + - A unique read-only string that changes whenever the resource create. + returned: always + type: str + sample: "datetime'2020-09-16T02%3A44%3A27.834293Z'" + id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxx/resourceGroups/resourcegroup_name/ \ + providers/Microsoft.RecoveryServices/vaults/rev_name" + location: + description: + - The location of the resource. + returned: always + type: str + sample: "eastus" + name: + description: + - Name of the recovery services vault name. + returned: always + type: str + sample: revault_name + properties: + description: + - The recovery service vault properties. + returned: always + type: dict + sample: { + "privateEndpointStateForBackup": "None", + "privateEndpointStateForSiteRecovery": "None", + "provisioningState": "Succeeded" + } + sku: + description: + - The sku type of the recovery service vault. + returned: always + type: str + sample: Standard + type: + description: + - The type of the recovery service vault. + returned: always + type: str + sample: "Microsoft.RecoveryServices/vaults" +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_rest import GenericRestClient +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt +import re +import json +import time + + +class AzureRMRecoveryServicesVault(AzureRMModuleBaseExt): + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + location=dict( + type='str', + required=True + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.name = None + self.location = None + self.state = None + + self.results = dict(changed=False) + self.mgmt_client = None + self.url = None + self.status_code = [200, 201, 202, 204] + + self.body = {} + self.query_parameters = {} + self.query_parameters['api-version'] = None + self.header_parameters = {} + self.header_parameters['Content-Type'] = 'application/json; charset=utf-8' + + super(AzureRMRecoveryServicesVault, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True + ) + + def get_api_version(self): + return '2016-06-01' + + def get_url(self): + if self.state == 'present' or self.state == 'absent': + return '/subscriptions/' \ + + self.subscription_id \ + + '/resourceGroups/' \ + + self.resource_group \ + + '/providers/Microsoft.RecoveryServices' \ + + '/vaults' + '/' \ + + self.name + + def get_body(self): + if self.state == 'present': + return { + "properties": {}, + "sku": { + "name": "Standard" + }, + "location": self.location + } + else: + return {} + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()): + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + self.body[key] = kwargs[key] + + self.inflate_parameters(self.module_arg_spec, self.body, 0) + + self.query_parameters['api-version'] = self.get_api_version() + self.url = self.get_url() + self.body = self.get_body() + old_response = None + response = None + + self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient, + base_url=self._cloud_environment.endpoints.resource_manager) + + old_response = self.get_resource() + + changed = False + if self.state == 'present': + if old_response is False: + changed = True + response = self.create_recovery_service_vault() + else: + changed = False + response = old_response + if self.state == 'absent': + changed = True + response = self.delete_recovery_service_vault() + + self.results['response'] = response + self.results['changed'] = changed + + return self.results + + def create_recovery_service_vault(self): + # self.log('Creating Recovery Service Vault Name {0}'.format(self.)) + try: + response = self.mgmt_client.query( + self.url, + 'PUT', + self.query_parameters, + self.header_parameters, + self.body, + self.status_code, + 600, + 30, + ) + except Exception as e: + self.log('Error in creating Azure Recovery Service Vault.') + self.fail('Error in creating Azure Recovery Service Vault {0}'.format(str(e))) + + try: + response = json.loads(response.text) + except Exception: + response = {'text': response.text} + + return response + + def delete_recovery_service_vault(self): + # self.log('Deleting Recovery Service Vault {0}'.format(self.)) + try: + response = self.mgmt_client.query( + self.url, + 'DELETE', + self.query_parameters, + self.header_parameters, + None, + self.status_code, + 600, + 30, + ) + except Exception as e: + self.log('Error attempting to delete Azure Recovery Service Vault.') + self.fail('Error while deleting Azure Recovery Service Vault: {0}'.format(str(e))) + + try: + response = json.loads(response.text) + except Exception: + response = {'text': response.text} + return response + + def get_resource(self): + # self.log('Get Recovery Service Vault Name {0}'.format(self.)) + found = False + try: + response = self.mgmt_client.query( + self.url, + 'GET', + self.query_parameters, + self.header_parameters, + None, + self.status_code, + 600, + 30, + ) + found = True + except Exception as e: + self.log('Recovery Service Vault Does not exist.') + if found is True: + response = json.loads(response.text) + return response + else: + return False + + +def main(): + AzureRMRecoveryServicesVault() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_recoveryservicesvault_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_recoveryservicesvault_info.py new file mode 100644 index 000000000..e7f2a6192 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_recoveryservicesvault_info.py @@ -0,0 +1,205 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 Suyeb Ansari (@suyeb786) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = \ + ''' +--- +module: azure_rm_recoveryservicesvault_info +version_added: '1.1.0' +short_description: Get Azure Recovery Services vault Details +description: + - Get Azure Recovery Services vault Details. +options: + resource_group: + description: + - The name of the resource group. + required: true + type: str + name: + description: + - The name of the Azure Recovery Service Vault. + required: true + type: str +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags +author: + - Suyeb Ansari (@suyeb786) +''' + +EXAMPLES = ''' + - name: Get Azure Recovery Services Vault Details. + azure_rm_recoveryservicesvault_info: + resource_group: 'myResourceGroup' + name: 'testVault' +''' + +RETURN = ''' +response: + description: + - The response about the current state of the recovery services vault. + returned: always + type: complex + contains: + etag: + description: + - A unique read-only string that changes whenever the resource create. + returned: always + type: str + sample: "datetime'2020-09-16T02%3A44%3A27.834293Z'" + id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxx/resourceGroups/resourcegroup_name/ \ + providers/Microsoft.RecoveryServices/vaults/rev_name" + location: + description: + - The location of the resource. + returned: always + type: str + sample: "eastus" + name: + description: + - Name of the recovery services vault name. + returned: always + type: str + sample: revault_name + properties: + description: + - The recovery service vault properties. + returned: always + type: dict + sample: { + "privateEndpointStateForBackup": "None", + "privateEndpointStateForSiteRecovery": "None", + "provisioningState": "Succeeded" + } + sku: + description: + - The sku type of the recovery service vault. + returned: always + type: str + sample: Standard + type: + description: + - The type of the recovery service vault. + returned: always + type: str + sample: "Microsoft.RecoveryServices/vaults" +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_rest import GenericRestClient +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt +import re +import json +import time + + +class AzureRMRecoveryServicesVaultInfo(AzureRMModuleBaseExt): + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ) + ) + + self.resource_group = None + self.name = None + + self.body = {} + self.results = dict(changed=False) + self.mgmt_client = None + self.url = None + self.status_code = [200, 201, 202, 204] + + self.query_parameters = {} + self.query_parameters['api-version'] = None + self.header_parameters = {} + self.header_parameters['Content-Type'] = 'application/json; charset=utf-8' + + super(AzureRMRecoveryServicesVaultInfo, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True + ) + + def get_api_version(self): + return '2016-06-01' + + def get_url(self): + return '/subscriptions/' \ + + self.subscription_id \ + + '/resourceGroups/' \ + + self.resource_group \ + + '/providers/Microsoft.RecoveryServices' \ + + '/vaults' + '/' \ + + self.name + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()): + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + self.body[key] = kwargs[key] + + self.inflate_parameters(self.module_arg_spec, self.body, 0) + + self.query_parameters['api-version'] = self.get_api_version() + self.url = self.get_url() + old_response = None + response = None + + self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient, + base_url=self._cloud_environment.endpoints.resource_manager) + + changed = True + response = self.get_recovery_service_vault_info() + + self.results['response'] = response + self.results['changed'] = changed + + return self.results + + def get_recovery_service_vault_info(self): + # self.log('Get Recovery Service Vault Details {0}'.format(self.)) + try: + response = self.mgmt_client.query( + self.url, + 'GET', + self.query_parameters, + self.header_parameters, + None, + self.status_code, + 600, + 30, + ) + except Exception as e: + self.log('Error in fetching Azure Recovery Service Vault Details.') + self.fail('Error in fetching Azure Recovery Service Vault Details {0}'.format(str(e))) + + try: + response = json.loads(response.text) + except Exception: + response = {'text': response.text} + + return response + + +def main(): + AzureRMRecoveryServicesVaultInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_rediscache.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_rediscache.py new file mode 100644 index 000000000..e004eb056 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_rediscache.py @@ -0,0 +1,853 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Yunge Zhu, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_rediscache +version_added: "0.1.2" +short_description: Manage Azure Cache for Redis instance +description: + - Create, update and delete instance of Azure Cache for Redis. + +options: + resource_group: + description: + - Name of the resource group to which the resource belongs. + required: True + name: + description: + - Unique name of the Azure Cache for Redis to create or update. + required: True + location: + description: + - Resource location. If not set, location from the resource group will be used as default. + sku: + description: + - SKU info of Azure Cache for Redis. + suboptions: + name: + description: + - Type of Azure Cache for Redis to deploy. + choices: + - basic + - standard + - premium + required: True + size: + description: + - Size of Azure Cache for Redis to deploy. + - When I(sku=basic) or I(sku=standard), allowed values are C(C0), C(C1), C(C2), C(C3), C(C4), C(C5), C(C6). + - When I(sku=premium), allowed values are C(P1), C(P2), C(P3), C(P4). + - Please see U(https://docs.microsoft.com/en-us/rest/api/redis/redis/create#sku) for allowed values. + choices: + - C0 + - C1 + - C2 + - C3 + - C4 + - C5 + - C6 + - P1 + - P2 + - P3 + - P4 + required: True + enable_non_ssl_port: + description: + - When set I(enable_non_ssl_port=true), the non-ssl Redis server port 6379 will be enabled. + type: bool + default: false + maxfragmentationmemory_reserved: + description: + - Configures the amount of memory in MB that is reserved to accommodate for memory fragmentation. + - Please see U(https://docs.microsoft.com/en-us/azure/redis-cache/cache-configure#advanced-settings) for more detail. + maxmemory_reserved: + description: + - Configures the amount of memory in MB that is reserved for non-cache operations. + - Please see U(https://docs.microsoft.com/en-us/azure/redis-cache/cache-configure#advanced-settings) for more detail. + maxmemory_policy: + description: + - Configures the eviction policy of the cache. + - Please see U(https://docs.microsoft.com/en-us/azure/redis-cache/cache-configure#advanced-settings) for more detail. + choices: + - volatile_lru + - allkeys_lru + - volatile_random + - allkeys_random + - volatile_ttl + - noeviction + minimum_tls_version: + description: + - Require clients to use a specified TLS version. + type: str + choices: + - "1.0" + - "1.1" + - "1.2" + version_added: "1.10.0" + public_network_access: + description: + - Whether or not public endpoint access is allowed for this cache. + type: str + default: Enabled + choices: + - Enabled + - Disabled + version_added: "1.10.0" + notify_keyspace_events: + description: + - Allows clients to receive notifications when certain events occur. + - Please see U(https://docs.microsoft.com/en-us/azure/redis-cache/cache-configure#advanced-settings) for more detail. + type: str + redis_version: + description: + - The major version of Redis. + type: str + choices: + - "4" + - "6" + default: "6" + version_added: "1.10.0" + shard_count: + description: + - The number of shards to be created when I(sku=premium). + type: int + static_ip: + description: + - Static IP address. Required when deploying an Azure Cache for Redis inside an existing Azure virtual network. + subnet: + description: + - Subnet in a virtual network to deploy the Azure Cache for Redis in. + - It can be resource id of subnet, for example + /subscriptions/{subid}/resourceGroups/{resourceGroupName}/Microsoft.{Network|ClassicNetwork}/VirtualNetworks/vnet1/subnets/subnet1. + - It can be a dictionary where contains I(name), I(virtual_network_name) and I(resource_group). + - I(name). Name of the subnet. + - I(resource_group). Resource group name of the subnet. + - I(virtual_network_name). Name of virtual network to which this subnet belongs. + tenant_settings: + description: + - Dict of tenant settings. + type: dict + reboot: + description: + - Reboot specified Redis node(s). There can be potential data loss. + suboptions: + shard_id: + description: + - If clustering is enabled, the id of the shard to be rebooted. + type: int + reboot_type: + description: + - Which Redis node(s) to reboot. + choices: + - primary + - secondary + - all + default: all + regenerate_key: + description: + - Regenerate Redis cache's access keys. + suboptions: + key_type: + description: + - The Redis key to regenerate. + choices: + - primary + - secondary + wait_for_provisioning: + description: + - Wait till the Azure Cache for Redis instance provisioning_state is Succeeded. + - It takes several minutes for Azure Cache for Redis to be provisioned ready for use after creating/updating/rebooting. + - Set this option to C(true) to wait for provisioning_state. Set to C(false) if you don't care about provisioning_state. + - Poll wait timeout is 60 minutes. + type: bool + default: True + state: + description: + - Assert the state of the Azure Cache for Redis. + - Use C(present) to create or update an Azure Cache for Redis and C(absent) to delete it. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Yunge Zhu(@yungezz) + +''' + +EXAMPLES = ''' +- name: Create an Azure Cache for Redis + azure_rm_rediscache: + resource_group: myResourceGroup + name: myRedis + sku: + name: basic + size: C1 + +- name: Scale up the Azure Cache for Redis + azure_rm_rediscache: + resource_group: myResourceGroup + name: myRedis + sku: + name: standard + size: C1 + tags: + testing: foo + +- name: Force reboot the redis cache + azure_rm_rediscache: + resource_group: myResourceGroup + name: myRedisCache + reboot: + reboot_type: all + +- name: Create Azure Cache for Redis with subnet + azure_rm_rediscache: + resource_group: myResourceGroup + name: myRedis + sku: + name: premium + size: P1 + subnet: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/myVirt + ualNetwork/subnets/mySubnet" + +- name: Regenerate primary Redis key + azure_rm_rediscache: + resource_group: myResourceGroup + name: myRedis + regenerate_key: + key_type: primary +''' + +RETURN = ''' +id: + description: + - Id of the Azure Cache for Redis. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Cache/Redis/myRedis" +host_name: + description: + - Host name of the Azure Cache for Redis. + returned: when I(state=present) + type: str + sample: "myredis.redis.cache.windows.net" +''' + +import time + +try: + from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + from azure.core.exceptions import ResourceNotFoundError + from msrestazure.azure_operation import AzureOperationPoller + from azure.core.polling import LROPoller + from azure.mgmt.redis import RedisManagementClient + from azure.mgmt.redis.models import ( + RedisCreateParameters, RedisUpdateParameters, Sku, RedisRebootParameters, RedisRegenerateKeyParameters + ) +except ImportError: + # This is handled in azure_rm_common + pass + + +sku_spec = dict( + name=dict( + type='str', + choices=['basic', 'standard', 'premium']), + size=dict( + type='str', + choices=['C0', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'P1', 'P2', 'P3', 'P4'] + ) +) + + +reboot_spec = dict( + shard_id=dict( + type='str' + ), + reboot_type=dict( + type='str', + choices=['primary', 'secondary', 'all'] + ) +) + + +regenerate_key_spec = dict( + key_type=dict( + type='str', + choices=['primary', 'secondary'] + ) +) + + +def rediscache_to_dict(redis): + result = dict( + id=redis.id, + name=redis.name, + location=redis.location, + sku=dict( + name=redis.sku.name.lower(), + size=redis.sku.family + str(redis.sku.capacity) + ), + enable_non_ssl_port=redis.enable_non_ssl_port, + host_name=redis.host_name, + minimum_tls_version=redis.minimum_tls_version, + public_network_access=redis.public_network_access, + redis_version=redis.redis_version, + shard_count=redis.shard_count, + subnet=redis.subnet_id, + static_ip=redis.static_ip, + provisioning_state=redis.provisioning_state, + tenant_settings=redis.tenant_settings, + tags=redis.tags if redis.tags else None + ) + for key in redis.redis_configuration: + result[hyphen_to_underline(key)] = hyphen_to_underline(redis.redis_configuration.get(key, None)) + return result + + +def hyphen_to_underline(input): + if input and isinstance(input, str): + return input.replace("-", "_") + return input + + +def underline_to_hyphen(input): + if input and isinstance(input, str): + return input.replace("_", "-") + return input + + +def get_reboot_type(type): + if type == "primary": + return "PrimaryNode" + if type == "secondary": + return "SecondaryNode" + if type == "all": + return "AllNodes" + return type + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMRedisCaches(AzureRMModuleBase): + """Configuration class for an Azure RM Cache for Redis resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + location=dict( + type='str' + ), + sku=dict( + type='dict', + options=sku_spec + ), + enable_non_ssl_port=dict( + type='bool', + default=False + ), + maxfragmentationmemory_reserved=dict( + type='int' + ), + maxmemory_reserved=dict( + type='int' + ), + maxmemory_policy=dict( + type='str', + choices=[ + "volatile_lru", + "allkeys_lru", + "volatile_random", + "allkeys_random", + "volatile_ttl", + "noeviction" + ] + ), + minimum_tls_version=dict( + type="str", + choices=["1.0", "1.1", "1.2"] + ), + notify_keyspace_events=dict( + type='str', + no_log=True + ), + public_network_access=dict( + type="str", + default="Enabled", + choices=["Enabled", "Disabled"] + ), + redis_version=dict( + type="str", + default="6", + choices=["4", "6"] + ), + shard_count=dict( + type='int' + ), + static_ip=dict( + type='str' + ), + subnet=dict( + type='raw' + ), + tenant_settings=dict( + type='dict' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ), + reboot=dict( + type='dict', + options=reboot_spec + ), + regenerate_key=dict( + type='dict', + no_log=True, + options=regenerate_key_spec + ), + wait_for_provisioning=dict( + type='bool', + default='True' + ) + ) + + self._client = None + + self.resource_group = None + self.name = None + self.location = None + + self.sku = None + self.size = None + self.enable_non_ssl_port = False + self.configuration_file_path = None + self.minimum_tls_version = None + self.public_network_access = None + self.redis_version = None + self.shard_count = None + self.static_ip = None + self.subnet = None + self.tenant_settings = None + self.reboot = None + self.regenerate_key = None + + self.wait_for_provisioning = None + self.wait_for_provisioning_polling_interval_in_seconds = 30 + self.wait_for_provisioning_polling_times = 120 + + self.tags = None + + self.results = dict( + changed=False, + id=None, + host_name=None + ) + self.state = None + self.to_do = Actions.NoAction + + super(AzureRMRedisCaches, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + old_response = None + response = None + to_be_updated = False + + # define redis_configuration properties + self.redis_configuration_properties = ["maxfragmentationmemory_reserved", + "maxmemory_reserved", + "maxmemory_policy", + "notify_keyspace_events"] + + # get management client + self._client = self.get_mgmt_svc_client(RedisManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager, + api_version='2018-03-01', + is_track2=True) + + # set location + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + self.location = resource_group.location + + # check subnet exists + if self.subnet: + self.subnet = self.parse_subnet() + + # get existing Azure Cache for Redis + old_response = self.get_rediscache() + + if old_response: + self.results['id'] = old_response['id'] + + if self.state == 'present': + # if redis not exists + if not old_response: + self.log("Azure Cache for Redis instance doesn't exist") + + to_be_updated = True + self.to_do = Actions.Create + + if not self.sku: + self.fail("Please specify sku to creating new Azure Cache for Redis.") + + else: + # redis exists already, do update + self.log("Azure Cache for Redis instance already exists") + + update_tags, self.tags = self.update_tags(old_response.get('tags', None)) + + if update_tags: + to_be_updated = True + self.to_do = Actions.Update + + # check if update + if self.check_update(old_response): + to_be_updated = True + self.to_do = Actions.Update + + elif self.state == 'absent': + if old_response: + self.log("Delete Azure Cache for Redis instance") + self.results['id'] = old_response['id'] + to_be_updated = True + self.to_do = Actions.Delete + else: + self.results['changed'] = False + self.log("Azure Cache for Redis {0} not exists.".format(self.name)) + + if to_be_updated: + self.log('Need to Create/Update Azure Cache for Redis') + self.results['changed'] = True + + if self.check_mode: + return self.results + + if self.to_do == Actions.Create: + response = self.create_rediscache() + self.results['id'] = response['id'] + self.results['host_name'] = response['host_name'] + + if self.to_do == Actions.Update: + response = self.update_rediscache() + self.results['id'] = response['id'] + self.results['host_name'] = response['host_name'] + + if self.to_do == Actions.Delete: + self.delete_rediscache() + self.log('Azure Cache for Redis instance deleted') + + if self.reboot: + self.reboot['reboot_type'] = get_reboot_type(self.reboot['reboot_type']) + self.force_reboot_rediscache() + self.results['changed'] = True + + if self.regenerate_key: + response = self.regenerate_rediscache_key() + self.results['changed'] = True + self.results['keys'] = response + + return self.results + + def check_update(self, existing): + if self.enable_non_ssl_port is not None and existing['enable_non_ssl_port'] != self.enable_non_ssl_port: + self.log("enable_non_ssl_port diff: origin {0} / update {1}".format(existing['enable_non_ssl_port'], self.enable_non_ssl_port)) + return True + if self.sku is not None: + if existing['sku']['name'] != self.sku['name']: + self.log("sku diff: origin {0} / update {1}".format(existing['sku']['name'], self.sku['name'])) + return True + if existing['sku']['size'] != self.sku['size']: + self.log("size diff: origin {0} / update {1}".format(existing['sku']['size'], self.sku['size'])) + return True + if self.tenant_settings is not None and existing['tenant_settings'] != self.tenant_settings: + self.log("tenant_settings diff: origin {0} / update {1}".format(existing['tenant_settings'], self.tenant_settings)) + return True + if self.shard_count is not None and existing['shard_count'] != self.shard_count: + self.log("shard_count diff: origin {0} / update {1}".format(existing['shard_count'], self.shard_count)) + return True + if self.subnet is not None and existing['subnet'] != self.subnet: + self.log("subnet diff: origin {0} / update {1}".format(existing['subnet'], self.subnet)) + return True + if self.static_ip is not None and existing['static_ip'] != self.static_ip: + self.log("static_ip diff: origin {0} / update {1}".format(existing['static_ip'], self.static_ip)) + return True + if self.minimum_tls_version is not None and existing['minimum_tls_version'] != self.minimum_tls_version: + self.log("minimum_tls_version diff: origin {0} / update {1}".format(existing['minimum_tls_version'], self.minimum_tls_version)) + return True + if self.public_network_access is not None and existing['public_network_access'] != self.public_network_access: + self.log("public_network_access diff: origin {0} / update {1}".format(existing['public_network_access'], self.public_network_access)) + return True + if self.redis_version is not None and existing['redis_version'][0] != self.redis_version[0]: + self.log("redis_version diff: origin {0} / update {1}".format(existing['redis_version'], self.redis_version)) + return True + for config in self.redis_configuration_properties: + if getattr(self, config) is not None and existing.get(config, None) != getattr(self, config, None): + self.log("redis_configuration {0} diff: origin {1} / update {2}".format(config, existing.get(config, None), getattr(self, config, None))) + return True + return False + + def create_rediscache(self): + ''' + Creates Azure Cache for Redis instance with the specified configuration. + + :return: deserialized Azure Cache for Redis instance state dictionary + ''' + self.log( + "Creating Azure Cache for Redis instance {0}".format(self.name)) + + try: + redis_config = dict() + for key in self.redis_configuration_properties: + if getattr(self, key, None): + redis_config[underline_to_hyphen(key)] = underline_to_hyphen(getattr(self, key)) + + params = RedisCreateParameters( + location=self.location, + sku=Sku(name=self.sku['name'].title(), family=self.sku['size'][0], capacity=self.sku['size'][1:]), + tags=self.tags, + redis_configuration=redis_config, + enable_non_ssl_port=self.enable_non_ssl_port, + tenant_settings=self.tenant_settings, + minimum_tls_version=self.minimum_tls_version, + public_network_access=self.public_network_access, + redis_version=self.redis_version, + shard_count=self.shard_count, + subnet_id=self.subnet, + static_ip=self.static_ip + ) + + response = self._client.redis.begin_create(resource_group_name=self.resource_group, + name=self.name, + parameters=params) + + if isinstance(response, AzureOperationPoller) or isinstance(response, LROPoller): + response = self.get_poller_result(response) + + if self.wait_for_provisioning: + self.wait_for_redis_running() + except Exception as exc: + self.log('Error attempting to create the Azure Cache for Redis instance.') + self.fail( + "Error creating the Azure Cache for Redis instance: {0}".format(str(exc))) + return rediscache_to_dict(response) + + def update_rediscache(self): + ''' + Updates Azure Cache for Redis instance with the specified configuration. + + :return: Azure Cache for Redis instance state dictionary + ''' + self.log( + "Updating Azure Cache for Redis instance {0}".format(self.name)) + + try: + redis_config = dict() + for key in self.redis_configuration_properties: + if getattr(self, key, None): + redis_config[underline_to_hyphen(key)] = underline_to_hyphen(getattr(self, key)) + + params = RedisUpdateParameters( + redis_configuration=redis_config, + enable_non_ssl_port=self.enable_non_ssl_port, + tenant_settings=self.tenant_settings, + minimum_tls_version=self.minimum_tls_version, + public_network_access=self.public_network_access, + redis_version=self.redis_version, + shard_count=self.shard_count, + sku=Sku(name=self.sku['name'].title(), family=self.sku['size'][0], capacity=self.sku['size'][1:]), + tags=self.tags + ) + + response = self._client.redis.update(resource_group_name=self.resource_group, + name=self.name, + parameters=params) + if isinstance(response, AzureOperationPoller) or isinstance(response, LROPoller): + response = self.get_poller_result(response) + + if self.wait_for_provisioning: + self.wait_for_redis_running() + + except Exception as exc: + self.log('Error attempting to update the Azure Cache for Redis instance.') + self.fail( + "Error updating the Azure Cache for Redis instance: {0}".format(str(exc))) + return rediscache_to_dict(response) + + def delete_rediscache(self): + ''' + Deletes specified Azure Cache for Redis instance in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the Azure Cache for Redis instance {0}".format(self.name)) + try: + self._client.redis.begin_delete(resource_group_name=self.resource_group, + name=self.name) + except Exception as e: + self.log('Error attempting to delete the Azure Cache for Redis instance.') + self.fail( + "Error deleting the Azure Cache for Redis instance: {0}".format(str(e))) + return True + + def get_rediscache(self): + ''' + Gets the properties of the specified Azure Cache for Redis instance. + + :return: Azure Cache for Redis instance state dictionary + ''' + self.log("Checking if the Azure Cache for Redis instance {0} is present".format(self.name)) + + response = None + + try: + response = self._client.redis.get(resource_group_name=self.resource_group, + name=self.name) + + self.log("Response : {0}".format(response)) + self.log("Azure Cache for Redis instance : {0} found".format(response.name)) + return rediscache_to_dict(response) + except ResourceNotFoundError: + self.log("Didn't find Azure Cache for Redis {0} in resource group {1}".format( + self.name, self.resource_group)) + + return False + + def force_reboot_rediscache(self): + ''' + Force reboot specified redis cache instance in the specified subscription and resource group. + + :return: True + ''' + self.log("Force reboot the redis cache instance {0}".format(self.name)) + try: + params = RedisRebootParameters( + reboot_type=self.reboot['reboot_type'], + shard_id=self.reboot.get('shard_id'), + ) + response = self._client.redis.force_reboot(resource_group_name=self.resource_group, + name=self.name, + parameters=params) + if isinstance(response, AzureOperationPoller) or isinstance(response, LROPoller): + response = self.get_poller_result(response) + + if self.wait_for_provisioning: + self.wait_for_redis_running() + except Exception as e: + self.log('Error attempting to force reboot the redis cache instance.') + self.fail( + "Error force rebooting the redis cache instance: {0}".format(str(e))) + return True + + def regenerate_rediscache_key(self): + ''' + Regenerate key of redis cache instance in the specified subscription and resource group. + + :return: True + ''' + self.log("Regenerate key of redis cache instance {0}".format(self.name)) + try: + params = RedisRegenerateKeyParameters( + key_type=self.regenerate_key['key_type'].title(), + ) + response = self._client.redis.regenerate_key(resource_group_name=self.resource_group, + name=self.name, + parameters=params) + return response.as_dict() + except Exception as e: + self.log('Error attempting to regenerate key of redis cache instance.') + self.fail( + "Error regenerate key of redis cache instance: {0}".format(str(e))) + + def get_subnet(self): + ''' + Gets the properties of the specified subnet. + + :return: subnet id + ''' + self.log("Checking if the subnet {0} is present".format(self.name)) + + response = None + + try: + response = self.network_client.subnets.get(self.subnet['resource_group'], + self.subnet['virtual_network_name'], + self.subnet['name']) + + self.log("Subnet found : {0}".format(response)) + return response.id + + except ResourceNotFoundError as ex: + self.log("Didn't find subnet {0} in resource group {1}".format( + self.subnet['name'], self.subnet['resource_group'])) + + return False + + def parse_subnet(self): + if isinstance(self.subnet, dict): + if 'virtual_network_name' not in self.subnet or \ + 'name' not in self.subnet: + self.fail("Subnet dict must contains virtual_network_name and name") + if 'resource_group' not in self.subnet: + self.subnet['resource_group'] = self.resource_group + subnet_id = self.get_subnet() + else: + subnet_id = self.subnet + return subnet_id + + def wait_for_redis_running(self): + try: + response = self._client.redis.get(resource_group_name=self.resource_group, name=self.name) + status = response.provisioning_state + polling_times = 0 + + while polling_times < self.wait_for_provisioning_polling_times: + if status.lower() != "succeeded": + polling_times += 1 + time.sleep(self.wait_for_provisioning_polling_interval_in_seconds) + response = self._client.redis.get(resource_group_name=self.resource_group, name=self.name) + status = response.provisioning_state + else: + return True + self.fail("Azure Cache for Redis is not running after 60 mins.") + except Exception as e: + self.fail("Failed to get Azure Cache for Redis: {0}".format(str(e))) + + +def main(): + """Main execution""" + AzureRMRedisCaches() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_rediscache_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_rediscache_info.py new file mode 100644 index 000000000..fec05cd77 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_rediscache_info.py @@ -0,0 +1,376 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Yunge Zhu +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_rediscache_info + +version_added: "0.1.2" + +short_description: Get Azure Cache for Redis instance facts + +description: + - Get facts for Azure Cache for Redis instance. + +options: + resource_group: + description: + - The resource group to search for the desired Azure Cache for Redis. + required: True + name: + description: + - Limit results to a specific Azure Cache for Redis. + return_access_keys: + description: + - Indicate weather to return access keys of the Azure Cache for Redis. + default: False + type: bool + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Yunge Zhu (@yungezz) +''' + +EXAMPLES = ''' + - name: Get Azure Cache for Redis by name + azure_rm_rediscache_info: + resource_group: myResourceGroup + name: myRedis + + - name: Get Azure Cache for Redis with access keys by name + azure_rm_rediscache_info: + resource_group: myResourceGroup + name: myRedis + return_access_keys: true + + - name: Get Azure Cache for Redis in specific resource group + azure_rm_rediscache_info: + resource_group: myResourceGroup +''' + +RETURN = ''' +rediscaches: + description: + - List of Azure Cache for Redis instances. + returned: always + type: complex + contains: + resource_group: + description: + - Name of a resource group where the Azure Cache for Redis belongs to. + returned: always + type: str + sample: myResourceGroup + name: + description: + - Name of the Azure Cache for Redis. + returned: always + type: str + sample: myRedis + id: + description: + - Id of the Azure Cache for Redis. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Cache/Redis/myRedis + provisioning_state: + description: + - Provisioning state of the Redis cache. + returned: always + type: str + sample: Creating + location: + description: + - Location of the Azure Cache for Redis. + returned: always + type: str + sample: WestUS + enable_non_ssl_port: + description: + - Specifies whether the non-ssl Redis server port (6379) is enabled. + returned: always + type: bool + sample: false + sku: + description: + - Dict of SKU information. + returned: always + type: dict + contains: + name: + description: + - Name of the SKU. + returned: always + type: str + sample: standard + size: + description: + - Size of the Azure Cache for Redis. + returned: always + type: str + sample: C1 + static_ip: + description: + - Static IP address. + returned: always + type: str + sample: 10.75.0.11 + subnet: + description: + - The full resource ID of a subnet in a virtual network to deploy the Azure Cache for Redis in. + returned: always + type: str + sample: + - "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/Microsoft.Network/VirtualNetworks/myVirtualNetwo + rk/subnets/mySubnet" + configuration: + description: + - Dict of Redis configuration. + returned: always + type: dict + sample: maxmeory_reserved + host_name: + description: + - Redis host name. + returned: always + type: str + sample: testRedis.redis.cache.windows.net + minimum_tls_version: + description: + - The version TLS clients at which must connect. + returned: always + type: str + sample: 1.2 + version_added: "1.10.0" + public_network_access: + description: + - Whether or not public endpoint access is allowed for this cache. + returned: always + type: str + sample: Enabled + version_added: "1.10.0" + redis_version: + description: + - The version of Redis. + returned: always + type: str + sample: 6.0.14 + version_added: "1.10.0" + shard_count: + description: + - The number of shards on a Premium Cluster Cache. + returned: always + type: int + sample: 1 + tenant_settings: + description: + - Dict of tenant settings. + returned: always + type: dict + sample: { "key1": "value1" } + tags: + description: + - Resource tags. + returned: always + type: dict + sample: { "key1": "value1" } + access_keys: + description: + - Azure Cache for Redis access keys. + type: dict + returned: when I(return_access_keys=true) + contains: + primary: + description: + - The current primary key that clients can use to authenticate the Redis cahce. + returned: always + type: str + sample: X2xXXxx7xxxxxx5xxxx0xxxxx75xxxxxxxxXXXxxxxx= + secondary: + description: + - The current secondary key that clients can use to authenticate the Redis cahce. + returned: always + type: str + sample: X2xXXxx7xxxxxx5xxxx0xxxxx75xxxxxxxxXXXxxxxx= +''' + +try: + from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + from azure.mgmt.redis import RedisManagementClient + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # handled in azure_rm_common + pass + +import re + + +class AzureRMRedisCacheInfo(AzureRMModuleBase): + """Utility class to get Azure Cache for Redis facts""" + + def __init__(self): + + self.module_args = dict( + name=dict(type='str'), + resource_group=dict( + type='str', + required=True + ), + return_access_keys=dict( + type='bool', + default=False + ), + tags=dict(type='list', elements='str') + ) + + self.results = dict( + changed=False, + rediscaches=[] + ) + + self.name = None + self.resource_group = None + self.profile_name = None + self.tags = None + + self._client = None + + super(AzureRMRedisCacheInfo, self).__init__( + derived_arg_spec=self.module_args, + supports_check_mode=True, + supports_tags=False, + facts_module=True + ) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_rediscache_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_rediscache_facts' module has been renamed to 'azure_rm_rediscache_info'", version=(2.9, )) + + for key in self.module_args: + setattr(self, key, kwargs[key]) + + # get management client + self._client = self.get_mgmt_svc_client(RedisManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager, + api_version='2018-03-01', + is_track2=True) + + if self.name: + self.results['rediscaches'] = self.get_item() + else: + self.results['rediscaches'] = self.list_by_resourcegroup() + + return self.results + + def get_item(self): + """Get a single Azure Cache for Redis""" + + self.log('Get properties for {0}'.format(self.name)) + + item = None + result = [] + + try: + item = self._client.redis.get(resource_group_name=self.resource_group, name=self.name) + except ResourceNotFoundError: + pass + + if item and self.has_tags(item.tags, self.tags): + result = [self.serialize_rediscache(item)] + + return result + + def list_by_resourcegroup(self): + """Get all Azure Cache for Redis within a resource group""" + + self.log('List all Azure Cache for Redis within a resource group') + + try: + response = self._client.redis.list_by_resource_group(self.resource_group) + except Exception as exc: + self.fail('Failed to list all items - {0}'.format(str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(self.serialize_rediscache(item)) + + return results + + def list_keys(self): + """List Azure Cache for Redis keys""" + + self.log('List keys for {0}'.format(self.name)) + + item = None + + try: + item = self._client.redis.list_keys(resource_group_name=self.resource_group, name=self.name) + except Exception as exc: + self.fail("Failed to list redis keys of {0} - {1}".format(self.name, str(exc))) + + return item + + def serialize_rediscache(self, rediscache): + ''' + Convert an Azure Cache for Redis object to dict. + :param rediscache: Azure Cache for Redis object + :return: dict + ''' + new_result = dict( + id=rediscache.id, + resource_group=re.sub('\\/.*', '', re.sub('.*resourceGroups\\/', '', rediscache.id)), + name=rediscache.name, + location=rediscache.location, + provisioning_state=rediscache.provisioning_state, + configuration=rediscache.redis_configuration, + tenant_settings=rediscache.tenant_settings, + minimum_tls_version=rediscache.minimum_tls_version, + public_network_access=rediscache.public_network_access, + redis_version=rediscache.redis_version, + shard_count=rediscache.shard_count, + enable_non_ssl_port=rediscache.enable_non_ssl_port, + static_ip=rediscache.static_ip, + subnet=rediscache.subnet_id, + host_name=rediscache.host_name, + tags=rediscache.tags + ) + + if rediscache.sku: + new_result['sku'] = dict( + name=rediscache.sku.name.lower(), + size=rediscache.sku.family + str(rediscache.sku.capacity) + ) + if self.return_access_keys: + access_keys = self.list_keys() + if access_keys: + new_result['access_keys'] = dict( + primary=access_keys.primary_key, + secondary=access_keys.secondary_key + ) + return new_result + + +def main(): + """Main module execution code path""" + + AzureRMRedisCacheInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_rediscachefirewallrule.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_rediscachefirewallrule.py new file mode 100644 index 000000000..921f982a6 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_rediscachefirewallrule.py @@ -0,0 +1,322 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Yunge Zhu, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_rediscachefirewallrule +version_added: "0.1.2" +short_description: Manage Azure Cache for Redis Firewall rules +description: + - Create, update and delete Azure Cache for Redis Firewall rules. + +options: + resource_group: + description: + - Name of the resource group to which the resource belongs. + required: True + cache_name: + description: + - Name of the Azure Cache for Redis. + required: True + name: + description: + - Name of the Firewall rule. + required: True + start_ip_address: + description: + - The start IP address of the Azure Cache for Redis Firewall rule. Must be IPv4 format. + - Required when creating Firewall rule. + end_ip_address: + description: + - The end IP address of the Azure Cache for Redis Firewall rule. Must be IPv4 format. + - Required when creating Firewall rule. + state: + description: + - Assert the state of the Firewall rule of Azure Cache for Redis. + - Use C(present) to create or update Firewall rule of Azure Cache for Redis and C(absent) to delete it. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Yunge Zhu(@yungezz) + +''' + +EXAMPLES = ''' + - name: Create a Firewall rule for Azure Cache for Redis + azure_rm_rediscachefirewallrule: + resource_group: myResourceGroup + cache_name: myRedisCache + name: myRule + start_ip_address: 192.168.1.1 + end_ip_address: 192.168.1.4 + + - name: Update a Firewall rule for Azure Cache for Redis + azure_rm_rediscachefirewallrule: + resource_group: myResourceGroup + cache_name: myRedisCache + name: myRule + end_ip_address: 192.168.1.5 +''' + +RETURN = ''' +id: + description: + - Id of the Azure Cache for Redis. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Cache/redis/myRedis/firewallRules/myRule" +''' + +import time + +try: + from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + from azure.core.polling import LROPoller + from msrestazure.azure_operation import AzureOperationPoller + from azure.core.exceptions import ResourceNotFoundError + from azure.mgmt.redis import RedisManagementClient + from azure.mgmt.redis.models import ( + RedisFirewallRule + ) +except ImportError: + # This is handled in azure_rm_common + pass + + +def firewall_rule_to_dict(rule): + return dict( + id=rule.id, + name=rule.name, + start_ip_address=rule.start_ip, + end_ip_address=rule.end_ip, + type=rule.type + ) + + +class Actions: + NoAction, CreateUpdate, Delete = range(3) + + +class AzureRMRedisCacheFirewallRule(AzureRMModuleBase): + """Configuration class for an Azure RM Cache for Redis Firewall Rule resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + cache_name=dict( + type='str', + required=True + ), + start_ip_address=dict( + type='str' + ), + end_ip_address=dict( + type='str' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self._client = None + + self.resource_group = None + self.name = None + self.cache_name = None + + self.start_ip_address = None + self.end_ip_address = None + + self.results = dict( + changed=False, + id=None + ) + self.state = None + + self.to_do = Actions.NoAction + + super(AzureRMRedisCacheFirewallRule, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=False) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()): + setattr(self, key, kwargs[key]) + + old_response = None + response = None + + # get management client + self._client = self.get_mgmt_svc_client(RedisManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager, + api_version='2018-03-01', + is_track2=True) + + # check if the firewall rule exists + old_response = self.get() + + if old_response: + self.results['id'] = old_response['id'] + + if self.state == 'present': + # if firewall rule not exists + if not old_response: + self.log("Firewall Rule of Azure Cache for Redis doesn't exist") + + self.to_do = Actions.CreateUpdate + + else: + # redis exists already, do update + self.log("Firewall Rule of Azure Cache for Redis already exists") + + if self.start_ip_address is None: + self.start_ip_address = old_response['start_ip_address'] + if self.end_ip_address is None: + self.end_ip_address = old_response['end_ip_address'] + + # check if update + if self.check_update(old_response): + self.to_do = Actions.CreateUpdate + + elif self.state == 'absent': + if old_response: + self.log("Delete Firewall Rule of Azure Cache for Redis") + self.results['id'] = old_response['id'] + self.to_do = Actions.Delete + else: + self.results['changed'] = False + self.log("Azure Cache for Redis {0} doesn't exist.".format(self.name)) + + if self.to_do == Actions.CreateUpdate: + self.log('Need to Create/Update Firewall rule of Azure Cache for Redis') + self.results['changed'] = True + + if self.check_mode: + return self.results + + response = self.create_or_update() + self.results['id'] = response['id'] + + if self.to_do == Actions.Delete: + self.log('Delete Firewall rule of Azure Cache for Redis') + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete() + self.log('Firewall rule of Azure Cache for Redis deleted') + + return self.results + + def check_update(self, existing): + if self.start_ip_address and self.start_ip_address != existing['start_ip_address']: + self.log("start_ip_address diff: origin {0} / update {1}".format(existing['start_ip_address'], self.start_ip_address)) + return True + if self.end_ip_address and self.end_ip_address != existing['end_ip_address']: + self.log("end_ip_address diff: origin {0} / update {1}".format(existing['end_ip_address'], self.end_ip_address)) + return True + return False + + def create_or_update(self): + ''' + Creates Firewall rule of Azure Cache for Redis with the specified configuration. + + :return: deserialized Firewall rule of Azure Cache for Redis state dictionary + ''' + self.log( + "Creating Firewall rule of Azure Cache for Redis {0}".format(self.name)) + + try: + params = RedisFirewallRule( + name=self.name, + start_ip=self.start_ip_address, + end_ip=self.end_ip_address, + ) + response = self._client.firewall_rules.create_or_update(resource_group_name=self.resource_group, + cache_name=self.cache_name, + rule_name=self.name, + parameters=params) + if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller): + response = self.get_poller_result(response) + + except Exception as exc: + self.log('Error attempting to create/update Firewall rule of Azure Cache for Redis.') + self.fail( + "Error creating/updating Firewall rule of Azure Cache for Redis: {0}".format(str(exc))) + return firewall_rule_to_dict(response) + + def delete(self): + ''' + Deletes specified Firewall rule of Azure Cache for Redis in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the Firewall rule of Azure Cache for Redis {0}".format(self.name)) + try: + self._client.firewall_rules.delete(resource_group_name=self.resource_group, + rule_name=self.name, + cache_name=self.cache_name) + except Exception as e: + self.log('Error attempting to delete the Firewall rule of Azure Cache for Redis.') + self.fail( + "Error deleting the Firewall rule of Azure Cache for Redis: {0}".format(str(e))) + return True + + def get(self): + ''' + Gets the properties of the specified Firewall rule of Azure Cache for Redis. + + :return: Azure Cache for Redis Firewall Rule instance state dictionary + ''' + self.log("Checking if the Firewall Rule {0} is present".format(self.name)) + + response = None + + try: + response = self._client.firewall_rules.get(resource_group_name=self.resource_group, + rule_name=self.name, + cache_name=self.cache_name) + + self.log("Response : {0}".format(response)) + self.log("Redis Firewall Rule : {0} found".format(response.name)) + return firewall_rule_to_dict(response) + + except ResourceNotFoundError: + self.log("Didn't find Azure Redis Firewall rule {0} in resource group {1}".format( + self.name, self.resource_group)) + + return False + + +def main(): + """Main execution""" + AzureRMRedisCacheFirewallRule() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_registrationassignment.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_registrationassignment.py new file mode 100644 index 000000000..b895d8883 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_registrationassignment.py @@ -0,0 +1,273 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 Fred-Sun, (@Fred-Sun) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_registrationassignment +version_added: '1.3.0' +short_description: Manage Azure RegistrationAssignment instance +description: + - Create and delete instance of Azure RegistrationAssignment. +options: + scope: + description: + - Scope of the registration assignment. Can be in subscription or group level. + required: true + type: str + registration_assignment_id: + description: + - ID of the registration assignment. + - If is not specified, an UUID will be generated for it. + type: str + properties: + description: + - Properties of a registration assignment. + type: dict + suboptions: + registration_definition_id: + description: + - Fully qualified path of the registration definition. + required: true + type: str + state: + description: + - Assert the state of the RegistrationAssignment. + - Use C(present) to create or update an RegistrationAssignment and C(absent) to delete it. + default: present + type: str + choices: + - absent + - present +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags +author: + - Fred-Sun (@Fred-Sun) + +''' + +EXAMPLES = ''' + - name: Delete Registration Assignment + azure_rm_registrationassignment: + scope: subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + registration_assignment_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + state: absent + + + - name: Create Registration Assignment in subscription level + azure_rm_registrationassignment: + scope: subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + registration_assignment_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + properties: + registration_definition_id: /subscriptions/xxx-xxx/providers/Microsoft.ManagedServices/registrationDefinitions/xxx-xxx + + + - name: Create Registration Assignment in resourcegroup level with randomly generating registration_assignment_id + azure_rm_registrationassignment: + scope: subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup + properties: + registration_definition_id: /subscriptions/xxx-xxx/providers/Microsoft.ManagedServices/registrationDefinitions/xxx-xxx + +''' + +RETURN = ''' +state: + description: + - The state info of the registration assignment. + type: complex + returned: always + contains: + properties: + description: + - Properties of a registration assignment. + returned: always + type: complex + contains: + registration_definition_id: + description: + - Fully qualified path of the registration definition. + returned: always + type: str + sample: null + id: + description: + - The fully qualified path of the registration assignment. + returned: always + type: str + sample: /subscriptions/xxx-xxx/providers/Microsoft.ManagedServices/registrationAssignments/xxx-xxx + type: + description: + - Type of the resource. + returned: always + type: str + sample: Microsoft.ManagedServices/registrationAssignments + name: + description: + - Name of the registration assignment. + returned: always + type: str + sample: 9b2895ec-fb1e-4a1e-a978-abd9933d6b20 + +''' +import uuid +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt +try: + from azure.mgmt.managedservices import ManagedServicesClient + from msrestazure.azure_operation import AzureOperationPoller + from azure.core.polling import LROPoller +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMRegistrationAssignment(AzureRMModuleBaseExt): + def __init__(self): + self.module_arg_spec = dict( + scope=dict( + type='str', + required=True + ), + registration_assignment_id=dict( + type='str', + ), + properties=dict( + type='dict', + disposition='/properties', + options=dict( + registration_definition_id=dict( + type='str', + disposition='registration_definition_id', + required=True + ) + ) + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.scope = None + self.registration_assignment_id = None + self.expand_registration_definition = False + self.body = {} + + self.results = dict(changed=False) + self.mgmt_client = None + self.state = None + self.to_do = Actions.NoAction + + super(AzureRMRegistrationAssignment, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()): + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + self.body[key] = kwargs[key] + + self.inflate_parameters(self.module_arg_spec, self.body, 0) + + old_response = None + response = None + if self.registration_assignment_id is None: + self.registration_assignment_id = str(uuid.uuid4()) + + self.mgmt_client = self.get_mgmt_svc_client(ManagedServicesClient, + base_url=self._cloud_environment.endpoints.resource_manager, + api_version='2019-09-01', + is_track2=True, + suppress_subscription_id=True) + + old_response = self.get_resource() + + if not old_response: + if self.state == 'present': + self.to_do = Actions.Create + else: + if self.state == 'absent': + self.to_do = Actions.Delete + else: + modifiers = {} + self.create_compare_modifiers(self.module_arg_spec, '', modifiers) + self.results['modifiers'] = modifiers + self.results['compare'] = [] + if not self.default_compare(modifiers, self.body, old_response, '', self.results): + self.to_do = Actions.Update + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.results['changed'] = True + if self.check_mode: + return self.results + response = self.create_update_resource() + self.results['state'] = response + elif self.to_do == Actions.Delete: + self.results['changed'] = True + if self.check_mode: + return self.results + self.delete_resource() + else: + self.results['changed'] = False + response = old_response + self.results['state'] = response + + if self.state == 'present': + if self.results['state'].get('properties', None) is not None: + registration_definition_id = self.results['state']['properties']['registration_definition_id'] + self.results['state']['properties'].clear() + self.results['state']['properties']['registration_definition_id'] = registration_definition_id + + return self.results + + def create_update_resource(self): + try: + response = self.mgmt_client.registration_assignments.begin_create_or_update(scope=self.scope, + registration_assignment_id=self.registration_assignment_id, + request_body=self.body) + if isinstance(response, AzureOperationPoller) or isinstance(response, LROPoller): + response = self.get_poller_result(response) + except Exception as exc: + self.log('Error attempting to create the RegistrationAssignment instance.') + self.fail('Error creating the RegistrationAssignment instance: {0}'.format(str(exc))) + return response.as_dict() + + def delete_resource(self): + try: + response = self.mgmt_client.registration_assignments.begin_delete(scope=self.scope, + registration_assignment_id=self.registration_assignment_id) + except Exception as e: + self.log('Error attempting to delete the RegistrationAssignment instance.') + self.fail('Error deleting the RegistrationAssignment instance: {0}'.format(str(e))) + + return True + + def get_resource(self): + try: + response = self.mgmt_client.registration_assignments.get(scope=self.scope, + registration_assignment_id=self.registration_assignment_id, + expand_registration_definition=self.expand_registration_definition) + except Exception as e: + return False + return response.as_dict() + + +def main(): + AzureRMRegistrationAssignment() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_registrationassignment_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_registrationassignment_info.py new file mode 100644 index 000000000..60003a192 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_registrationassignment_info.py @@ -0,0 +1,189 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 Fred-Sun, (@Fred-Sun) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_registrationassignment_info +version_added: '1.3.0' +short_description: Get RegistrationAssignment info +description: + - Get info of RegistrationAssignment. +options: + scope: + description: + - Scope of the registration assignment. + required: true + type: str + registration_assignment_id: + description: + - ID of the registration assignment. + type: str +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags +author: + - Fred-Sun (@Fred-Sun) + +''' + +EXAMPLES = ''' + - name: Get Registration Assignment + azure_rm_registrationassignment_info: + registration_assignment_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + scope: subscription/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup + + + - name: Get All Registration Assignments in scope(subscription) + azure_rm_registrationassignment_info: + scope: subscription/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + + +''' + +RETURN = ''' +registration_assignments: + description: + - A list of dict results where the key is the name of the RegistrationAssignment. + - The values are the facts for that RegistrationAssignment. + returned: always + type: complex + contains: + properties: + description: + - Properties of a registration assignment. + returned: always + type: complex + contains: + registration_definition_id: + description: + - Fully qualified path of the registration definition. + returned: always + type: str + sample: /subscriptions/xxx-xxx/providers/Microsoft.ManagedServices/registrationDefinitions/xxx-xxx + id: + description: + - The fully qualified path of the registration assignment. + returned: always + type: str + sample: /subscriptions/xxx-xxxf/providers/Microsoft.ManagedServices/registrationAssignments/xxx-xxx + type: + description: + - Type of the resource. + returned: always + type: str + sample: Microsoft.ManagedServices/registrationAssignment + name: + description: + - Name of the registration assignment. + returned: always + type: str + sample: 9b2895ec-fb1e-4a1e-a978-abd9933d6b20 +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBase +try: + from msrestazure.azure_exceptions import CloudError + from azure.mgmt.managedservices import ManagedServicesClient + from msrestazure.azure_operation import AzureOperationPoller + from msrest.polling import LROPoller +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMRegistrationAssignmentInfo(AzureRMModuleBase): + def __init__(self): + self.module_arg_spec = dict( + scope=dict( + type='str', + required=True + ), + registration_assignment_id=dict( + type='str' + ) + ) + + self.scope = None + self.registration_assignment_id = None + self.expand_registration_definition = False + + self.results = dict(changed=False) + self.mgmt_client = None + self.state = None + self.url = None + self.status_code = [200] + + self.mgmt_client = None + super(AzureRMRegistrationAssignmentInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=True) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + self.mgmt_client = self.get_mgmt_svc_client(ManagedServicesClient, + base_url=self._cloud_environment.endpoints.resource_manager, + api_version='2020-09-01', + is_track2=True, + suppress_subscription_id=True) + + if (self.scope is not None and self.registration_assignment_id is not None): + self.results['registration_assignments'] = self.format_item(self.get()) + elif (self.scope is not None): + self.results['registration_assignments'] = self.format_item(self.list()) + + if len(self.results['registration_assignments']) > 0: + for item in self.results['registration_assignments']: + if item.get('properties', None) is not None: + registration_definition_id = item['properties']['registration_definition_id'] + item['properties'].clear() + item['properties']['registration_definition_id'] = registration_definition_id + return self.results + + def get(self): + response = None + + try: + response = self.mgmt_client.registration_assignments.get(scope=self.scope, + registration_assignment_id=self.registration_assignment_id, + expand_registration_definition=self.expand_registration_definition) + except Exception as e: + self.log('Could not get info for @(Model.ModuleOperationNameUpper).') + + return response + + def list(self): + response = None + + try: + response = self.mgmt_client.registration_assignments.list(scope=self.scope, + expand_registration_definition=self.expand_registration_definition) + except Exception as e: + self.log('Could not get info for @(Model.ModuleOperationNameUpper).') + + return response + + def format_item(self, item): + if hasattr(item, 'as_dict'): + return [item.as_dict()] + else: + result = [] + items = list(item) + for tmp in items: + result.append(tmp.as_dict()) + return result + + +def main(): + AzureRMRegistrationAssignmentInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_registrationdefinition.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_registrationdefinition.py new file mode 100644 index 000000000..9daaf9980 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_registrationdefinition.py @@ -0,0 +1,445 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 Fred-Sun, (@Fred-Sun) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_registrationdefinition +version_added: '1.3.0' +short_description: Manage Azure RegistrationDefinition instance +description: + - Create, update and delete instance of Azure RegistrationDefinition. +options: + registration_definition_id: + description: + - ID of the registration definition. + - If is not specified, an UUID will be generated for it. + type: str + scope: + description: + - The subscription ID defines the subscription in which the registration definition will be created. + - If not specified, will use the subscription derived from AzureRMAuth. + type: str + properties: + description: + - Properties of a registration definition. + type: dict + suboptions: + description: + description: + - Description of the registration definition. + type: str + authorizations: + description: + - Authorization tuple containing principal ID of the user/security group or service principal and ID of the build-in role. + required: true + type: list + elements: dict + suboptions: + principal_id: + description: + - Principal ID of the security group/service principal/user that would be assigned permissions to the projected subscription. + required: true + type: str + role_definition_id: + description: + - The role definition identifier. + - This role will define all the permissions that the security group/service principal/user must have on the projected subscription. + - This role cannot be an owner role. + required: true + type: str + registration_definition_name: + description: + - Name of the registration definition. + type: str + managed_by_tenant_id: + description: + - ID of the managedBy tenant. + required: true + type: str + plan: + description: + - Plan details for the managed services. + type: dict + suboptions: + name: + description: + - The plan name. + required: true + type: str + publisher: + description: + - The publisher ID. + required: true + type: str + product: + description: + - The product code. + required: true + type: str + version: + description: + - The plan's version. + required: true + type: str + state: + description: + - Assert the state of the RegistrationDefinition. + - Use C(present) to create or update an RegistrationDefinition and C(absent) to delete it. + default: present + type: str + choices: + - absent + - present +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags +author: + - Fred-Sun (@Fred-Sun) + +''' + +EXAMPLES = ''' + - name: Create Registration Definition without scope + azure_rm_registrationdefinition: + registration_definition_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + properties: + description: test + authorizations: + - principal_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + role_definition_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + managed_by_tenant_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + registration_definition_name: def4 + + - name: Create Registration Definition with scope + azure_rm_registrationdefinition: + scope: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + registration_definition_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + properties: + description: test + authorizations: + - principal_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + role_definition_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + managed_by_tenant_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + registration_definition_name: def5 + + - name: Delete Registration Definition + azure_rm_registrationdefinition: + registration_definition_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + state: absent + +''' + +RETURN = ''' +state: + description: + - The state info of the registration assignment. + type: complex + returned: always + contains: + properties: + description: + - Properties of a registration definition. + returned: always + type: complex + contains: + description: + description: + - Description of the registration definition. + returned: always + type: str + sample: test + authorizations: + description: + - Authorization tuple containing principal ID of the user/security group or service principal and ID of the build-in role. + returned: always + type: complex + contains: + principal_id: + description: + - Principal ID of the security group/service principal/user that would be assigned permissions to the projected subscription + returned: always + type: str + sample: 99e3227f-8701-4099-869f-bc3efc7f1e64 + role_definition_id: + description: + - The role definition identifier. + - This role will define all the permissions that the security group/service principal/user must have on the subscription. + - This role cannot be an owner role. + returned: always + type: str + sample: b24988ac-6180-42a0-ab88-20f7382dd24c + registration_definition_name: + description: + - Name of the registration definition. + returned: always + type: str + sample: null + managed_by_tenant_id: + description: + - ID of the managedBy tenant. + returned: always + type: str + sample: null + plan: + description: + - Plan details for the managed services. + returned: always + type: complex + contains: + name: + description: + - The plan name. + returned: always + type: str + sample: null + publisher: + description: + - The publisher ID. + returned: always + type: str + sample: null + product: + description: + - The product code. + returned: always + type: str + sample: null + version: + description: + - The plan's version. + returned: always + type: str + sample: null + id: + description: + - Fully qualified path of the registration definition. + returned: always + type: str + sample: null + type: + description: + - Type of the resource. + returned: always + type: str + sample: Microsoft.ManagedServices/registrationDefinitions + name: + description: + - Name of the registration definition. + returned: always + type: str + sample: /subscriptions/xxx-xxx/providers/Microsoft.ManagedServices/registrationDefinitions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + +''' +import uuid +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt +try: + from azure.mgmt.managedservices import ManagedServicesClient + from msrestazure.azure_operation import AzureOperationPoller + from azure.core.polling import LROPoller +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMRegistrationDefinition(AzureRMModuleBaseExt): + def __init__(self): + self.module_arg_spec = dict( + scope=dict( + type='str' + ), + registration_definition_id=dict( + type='str', + ), + properties=dict( + type='dict', + disposition='/properties', + options=dict( + description=dict( + type='str', + disposition='description' + ), + authorizations=dict( + type='list', + disposition='authorizations', + required=True, + elements='dict', + options=dict( + principal_id=dict( + type='str', + disposition='principal_id', + required=True + ), + role_definition_id=dict( + type='str', + disposition='role_definition_id', + required=True + ) + ) + ), + registration_definition_name=dict( + type='str', + disposition='registration_definition_name' + ), + managed_by_tenant_id=dict( + type='str', + disposition='managed_by_tenant_id', + required=True + ) + ) + ), + plan=dict( + type='dict', + disposition='/plan', + options=dict( + name=dict( + type='str', + disposition='name', + required=True + ), + publisher=dict( + type='str', + disposition='publisher', + required=True + ), + product=dict( + type='str', + disposition='product', + required=True + ), + version=dict( + type='str', + disposition='version', + required=True + ) + ) + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.scope = None + self.registration_definition_id = None + self.body = {} + + self.results = dict(changed=False) + self.mgmt_client = None + self.state = None + self.to_do = Actions.NoAction + + super(AzureRMRegistrationDefinition, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()): + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + self.body[key] = kwargs[key] + + self.inflate_parameters(self.module_arg_spec, self.body, 0) + + if self.registration_definition_id is None: + self.registration_definition_id = str(uuid.uuid4()) + + if not self.scope: + self.scope = "/subscriptions/" + self.subscription_id + else: + self.scope = "/subscriptions/" + self.scope + + old_response = None + response = None + + self.mgmt_client = self.get_mgmt_svc_client(ManagedServicesClient, + base_url=self._cloud_environment.endpoints.resource_manager, + api_version='2019-09-01', + is_track2=True, + suppress_subscription_id=True) + + old_response = self.get_resource() + + if not old_response: + if self.state == 'present': + self.to_do = Actions.Create + else: + if self.state == 'absent': + self.to_do = Actions.Delete + else: + modifiers = {} + self.create_compare_modifiers(self.module_arg_spec, '', modifiers) + self.results['modifiers'] = modifiers + self.results['compare'] = [] + if not self.default_compare(modifiers, self.body, old_response, '', self.results): + self.to_do = Actions.Update + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.results['changed'] = True + if self.check_mode: + return self.results + response = self.create_update_resource() + self.results['state'] = response + elif self.to_do == Actions.Delete: + self.results['changed'] = True + if self.check_mode: + return self.results + self.delete_resource() + else: + self.results['changed'] = False + response = old_response + + return self.results + + def create_update_resource(self): + + try: + response = self.mgmt_client.registration_definitions.begin_create_or_update( + registration_definition_id=self.registration_definition_id, + scope=self.scope, + request_body=self.body) + + if isinstance(response, AzureOperationPoller) or isinstance(response, LROPoller): + response = self.get_poller_result(response) + except Exception as exc: + self.log('Error attempting to create the RegistrationDefinition instance.') + self.fail('Error creating the RegistrationDefinition instance: {0}'.format(str(exc))) + return response.as_dict() + + def delete_resource(self): + try: + response = self.mgmt_client.registration_definitions.delete(registration_definition_id=self.registration_definition_id, + scope=self.scope) + except Exception as e: + self.log('Error attempting to delete the RegistrationDefinition instance.') + self.fail('Error deleting the RegistrationDefinition instance: {0}'.format(str(e))) + + return True + + def get_resource(self): + try: + response = self.mgmt_client.registration_definitions.get(scope=self.scope, + registration_definition_id=self.registration_definition_id) + except Exception as e: + return False + return response.as_dict() + + +def main(): + AzureRMRegistrationDefinition() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_registrationdefinition_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_registrationdefinition_info.py new file mode 100644 index 000000000..3c8fabe95 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_registrationdefinition_info.py @@ -0,0 +1,243 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 Fred-Sun, (@Fred-Sun) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_registrationdefinition_info +version_added: '1.3.0' +short_description: Get RegistrationDefinition info +description: + - Get info of RegistrationDefinition. +options: + scope: + description: + - The subscription ID defines the subscription in which the registration definition will be retrieved. + - If not specified, will use the subscription derived from AzureRMAuth. + type: str + registration_definition_id: + description: + - ID of the registration definition. + type: str +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags +author: + - Fred-Sun (@Fred-Sun) +''' + +EXAMPLES = ''' + - name: Get Registration Definition + azure_rm_registrationdefinition_info: + registration_definition_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + + - name: Get All Registration Definitions from AzureRMAuth's subscription + azure_rm_registrationdefinition_info: + + - name: Get All Registration Definitions in the subscription levle + azure_rm_registrationdefinition_info: + scope: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + +''' + +RETURN = ''' +registration_definitions: + description: + - A list of dict results where the key is the name of the RegistrationDefinition and the values are the facts for that RegistrationDefinition. + returned: always + type: complex + contains: + properties: + description: + - Properties of a registration definition. + returned: always + type: complex + contains: + description: + description: + - Description of the registration definition. + returned: always + type: str + sample: null + authorizations: + description: + - Authorization tuple containing principal ID of the user/security group or service principal and id of the build-in role. + returned: always + type: complex + contains: + principal_id: + description: + - Principal ID of the security group/service principal/user that would be assigned permissions to the projected subscription. + returned: always + type: str + sample: null + role_definition_id: + description: + - The role definition identifier. + - The role will define all the permissions that the security group/service principal/user must have on the subscription. + - The role cannot be an owner role. + returned: always + type: str + sample: null + registration_definition_name: + description: + - Name of the registration definition. + returned: always + type: str + sample: null + managed_by_tenant_id: + description: + - ID of the managedBy tenant. + returned: always + type: str + sample: null + plan: + description: + - Plan details for the managed services. + returned: always + type: complex + contains: + name: + description: + - The plan name. + returned: always + type: str + sample: null + publisher: + description: + - The publisher ID. + returned: always + type: str + sample: null + product: + description: + - The product code. + returned: always + type: str + sample: null + version: + description: + - The plan's version. + returned: always + type: str + sample: null + id: + description: + - Fully qualified path of the registration definition. + returned: always + type: str + sample: null + type: + description: + - Type of the resource. + returned: always + type: str + sample: null + name: + description: + - Name of the registration definition. + returned: always + type: str + sample: null +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBase +try: + from msrestazure.azure_exceptions import CloudError + from azure.mgmt.managedservices import ManagedServicesClient + from msrestazure.azure_operation import AzureOperationPoller + from msrest.polling import LROPoller +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMRegistrationDefinitionInfo(AzureRMModuleBase): + def __init__(self): + self.module_arg_spec = dict( + scope=dict( + type='str' + ), + registration_definition_id=dict( + type='str' + ) + ) + + self.scope = None + self.registration_definition_id = None + + self.results = dict(changed=False) + self.mgmt_client = None + self.state = None + self.url = None + self.status_code = [200] + + self.mgmt_client = None + super(AzureRMRegistrationDefinitionInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=True) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if not self.scope: + self.scope = "/subscriptions/" + self.subscription_id + else: + self.scope = "/subscriptions/" + self.scope + + self.mgmt_client = self.get_mgmt_svc_client(ManagedServicesClient, + base_url=self._cloud_environment.endpoints.resource_manager, + api_version='2019-09-01', + is_track2=True, + suppress_subscription_id=True) + + if self.registration_definition_id is not None: + self.results['registration_definitions'] = self.format_item(self.get()) + else: + self.results['registration_definitions'] = self.format_item(self.list()) + return self.results + + def get(self): + response = None + + try: + response = self.mgmt_client.registration_definitions.get(scope=self.scope, + registration_definition_id=self.registration_definition_id) + except Exception as e: + self.log('Could not get info for @(Model.ModuleOperationNameUpper).') + + return response + + def list(self): + response = None + + try: + response = self.mgmt_client.registration_definitions.list(scope=self.scope) + except Exception as e: + self.log('Could not get info for @(Model.ModuleOperationNameUpper).') + + return response + + def format_item(self, item): + if hasattr(item, 'as_dict'): + return [item.as_dict()] + else: + result = [] + items = list(item) + for tmp in items: + result.append(tmp.as_dict()) + return result + + +def main(): + AzureRMRegistrationDefinitionInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_resource.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_resource.py new file mode 100644 index 000000000..0927128cd --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_resource.py @@ -0,0 +1,420 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Zim Kalinowski, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_resource +version_added: "0.1.2" +short_description: Create any Azure resource +description: + - Create, update or delete any Azure resource using Azure REST API. + - This module gives access to resources that are not supported via Ansible modules. + - Refer to U(https://docs.microsoft.com/en-us/rest/api/) regarding details related to specific resource REST API. + +options: + url: + description: + - Azure RM Resource URL. + api_version: + description: + - Specific API version to be used. + provider: + description: + - Provider type. + - Required if URL is not specified. + resource_group: + description: + - Resource group to be used. + - Required if URL is not specified. + resource_type: + description: + - Resource type. + - Required if URL is not specified. + resource_name: + description: + - Resource name. + - Required if URL Is not specified. + subresource: + description: + - List of subresources. + suboptions: + namespace: + description: + - Subresource namespace. + type: + description: + - Subresource type. + name: + description: + - Subresource name. + body: + description: + - The body of the HTTP request/response to the web service. + method: + description: + - The HTTP method of the request or response. It must be uppercase. + choices: + - GET + - PUT + - POST + - HEAD + - PATCH + - DELETE + - MERGE + default: "PUT" + status_code: + description: + - A valid, numeric, HTTP status code that signifies success of the request. Can also be comma separated list of status codes. + type: list + default: [ 200, 201, 202 ] + idempotency: + description: + - If enabled, idempotency check will be done by using I(method=GET) first and then comparing with I(body). + default: no + type: bool + polling_timeout: + description: + - If enabled, idempotency check will be done by using I(method=GET) first and then comparing with I(body). + default: 0 + type: int + polling_interval: + description: + - If enabled, idempotency check will be done by using I(method=GET) first and then comparing with I(body). + default: 60 + type: int + state: + description: + - Assert the state of the resource. Use C(present) to create or update resource or C(absent) to delete resource. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Update scaleset info using azure_rm_resource + azure_rm_resource: + resource_group: myResourceGroup + provider: compute + resource_type: virtualmachinescalesets + resource_name: myVmss + api_version: "2017-12-01" + body: { body } +''' + +RETURN = ''' +response: + description: + - Response specific to resource type. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + type: str + returned: always + sample: "/subscriptions/xxxx...xxxx/resourceGroups/v-xisuRG/providers/Microsoft.Storage/storageAccounts/staccb57dc95183" + kind: + description: + - The kind of storage. + type: str + returned: always + sample: Storage + location: + description: + - The resource location, defaults to location of the resource group. + type: str + returned: always + sample: eastus + name: + description: + The storage account name. + type: str + returned: always + sample: staccb57dc95183 + properties: + description: + - The storage account's related properties. + type: dict + returned: always + sample: { + "creationTime": "2019-06-13T06:34:33.0996676Z", + "encryption": { + "keySource": "Microsoft.Storage", + "services": { + "blob": { + "enabled": true, + "lastEnabledTime": "2019-06-13T06:34:33.1934074Z" + }, + "file": { + "enabled": true, + "lastEnabledTime": "2019-06-13T06:34:33.1934074Z" + } + } + }, + "networkAcls": { + "bypass": "AzureServices", + "defaultAction": "Allow", + "ipRules": [], + "virtualNetworkRules": [] + }, + "primaryEndpoints": { + "blob": "https://staccb57dc95183.blob.core.windows.net/", + "file": "https://staccb57dc95183.file.core.windows.net/", + "queue": "https://staccb57dc95183.queue.core.windows.net/", + "table": "https://staccb57dc95183.table.core.windows.net/" + }, + "primaryLocation": "eastus", + "provisioningState": "Succeeded", + "secondaryLocation": "westus", + "statusOfPrimary": "available", + "statusOfSecondary": "available", + "supportsHttpsTrafficOnly": false + } + sku: + description: + - The storage account SKU. + type: dict + returned: always + sample: { + "name": "Standard_GRS", + "tier": "Standard" + } + tags: + description: + - Resource tags. + type: dict + returned: always + sample: { 'key1': 'value1' } + type: + description: + - The resource type. + type: str + returned: always + sample: "Microsoft.Storage/storageAccounts" + +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_rest import GenericRestClient +from ansible.module_utils.common.dict_transformations import dict_merge + +try: + from msrestazure.azure_exceptions import CloudError + from msrest.service_client import ServiceClient + from msrestazure.tools import resource_id, is_valid_resource_id + import json + +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMResource(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + url=dict( + type='str' + ), + provider=dict( + type='str', + ), + resource_group=dict( + type='str', + ), + resource_type=dict( + type='str', + ), + resource_name=dict( + type='str', + ), + subresource=dict( + type='list', + default=[] + ), + api_version=dict( + type='str' + ), + method=dict( + type='str', + default='PUT', + choices=["GET", "PUT", "POST", "HEAD", "PATCH", "DELETE", "MERGE"] + ), + body=dict( + type='raw' + ), + status_code=dict( + type='list', + default=[200, 201, 202] + ), + idempotency=dict( + type='bool', + default=False + ), + polling_timeout=dict( + type='int', + default=0 + ), + polling_interval=dict( + type='int', + default=60 + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + # store the results of the module operation + self.results = dict( + changed=False, + response=None + ) + self.mgmt_client = None + self.url = None + self.api_version = None + self.provider = None + self.resource_group = None + self.resource_type = None + self.resource_name = None + self.subresource_type = None + self.subresource_name = None + self.subresource = [] + self.method = None + self.status_code = [] + self.idempotency = False + self.polling_timeout = None + self.polling_interval = None + self.state = None + self.body = None + super(AzureRMResource, self).__init__(self.module_arg_spec, supports_tags=False) + + def exec_module(self, **kwargs): + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient, + base_url=self._cloud_environment.endpoints.resource_manager) + + if self.state == 'absent': + self.method = 'DELETE' + self.status_code.append(204) + + if self.url is None: + orphan = None + rargs = dict() + rargs['subscription'] = self.subscription_id + rargs['resource_group'] = self.resource_group + if not (self.provider is None or self.provider.lower().startswith('.microsoft')): + rargs['namespace'] = "Microsoft." + self.provider + else: + rargs['namespace'] = self.provider + + if self.resource_type is not None and self.resource_name is not None: + rargs['type'] = self.resource_type + rargs['name'] = self.resource_name + for i in range(len(self.subresource)): + resource_ns = self.subresource[i].get('namespace', None) + resource_type = self.subresource[i].get('type', None) + resource_name = self.subresource[i].get('name', None) + if resource_type is not None and resource_name is not None: + rargs['child_namespace_' + str(i + 1)] = resource_ns + rargs['child_type_' + str(i + 1)] = resource_type + rargs['child_name_' + str(i + 1)] = resource_name + else: + orphan = resource_type + else: + orphan = self.resource_type + + self.url = resource_id(**rargs) + + if orphan is not None: + self.url += '/' + orphan + + # if api_version was not specified, get latest one + if not self.api_version: + try: + # extract provider and resource type + if "/providers/" in self.url: + provider = self.url.split("/providers/")[1].split("/")[0] + resourceType = self.url.split(provider + "/")[1].split("/")[0] + url = "/subscriptions/" + self.subscription_id + "/providers/" + provider + api_versions = json.loads(self.mgmt_client.query(url, "GET", {'api-version': '2015-01-01'}, None, None, [200], 0, 0).text) + for rt in api_versions['resourceTypes']: + if rt['resourceType'].lower() == resourceType.lower(): + self.api_version = rt['apiVersions'][0] + break + else: + # if there's no provider in API version, assume Microsoft.Resources + self.api_version = '2018-05-01' + if not self.api_version: + self.fail("Couldn't find api version for {0}/{1}".format(provider, resourceType)) + except Exception as exc: + self.fail("Failed to obtain API version: {0}".format(str(exc))) + + query_parameters = {} + query_parameters['api-version'] = self.api_version + + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + + needs_update = True + response = None + + if self.idempotency: + original = self.mgmt_client.query(self.url, "GET", query_parameters, None, None, [200, 404], 0, 0) + + if original.status_code == 404: + if self.state == 'absent': + needs_update = False + else: + try: + response = json.loads(original.text) + needs_update = (dict_merge(response, self.body) != response) + except Exception: + pass + + if needs_update: + response = self.mgmt_client.query(self.url, + self.method, + query_parameters, + header_parameters, + self.body, + self.status_code, + self.polling_timeout, + self.polling_interval) + if self.state == 'present': + try: + response = json.loads(response.text) + except Exception: + response = response.text + else: + response = None + + self.results['response'] = response + self.results['changed'] = needs_update + + return self.results + + +def main(): + AzureRMResource() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_resource_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_resource_info.py new file mode 100644 index 000000000..d1af99ca2 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_resource_info.py @@ -0,0 +1,443 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Zim Kalinowski, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_resource_info +version_added: "0.1.2" +short_description: Generic facts of Azure resources +description: + - Obtain facts of any resource using Azure REST API. + - This module gives access to resources that are not supported via Ansible modules. + - Refer to U(https://docs.microsoft.com/en-us/rest/api/) regarding details related to specific resource REST API. + +options: + url: + description: + - Azure RM Resource URL. + api_version: + description: + - Specific API version to be used. + provider: + description: + - Provider type, should be specified in no URL is given. + resource_group: + description: + - Resource group to be used. + - Required if URL is not specified. + resource_type: + description: + - Resource type. + resource_name: + description: + - Resource name. + method: + description: + - The HTTP method of the request or response. It must be uppercase. + choices: + - GET + - PUT + - POST + - HEAD + - PATCH + - DELETE + - MERGE + default: "GET" + subresource: + description: + - List of subresources. + suboptions: + namespace: + description: + - Subresource namespace. + type: + description: + - Subresource type. + name: + description: + - Subresource name. + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Get scaleset info + azure_rm_resource_info: + resource_group: myResourceGroup + provider: compute + resource_type: virtualmachinescalesets + resource_name: myVmss + api_version: "2017-12-01" + + - name: Query all the resources in the resource group + azure_rm_resource_info: + resource_group: "{{ resource_group }}" + resource_type: resources +''' + +RETURN = ''' +response: + description: + - Response specific to resource type. + returned: always + type: complex + contains: + id: + description: + - Id of the Azure resource. + type: str + returned: always + sample: "/subscriptions/xxxx...xxxx/resourceGroups/v-xisuRG/providers/Microsoft.Compute/virtualMachines/myVM" + location: + description: + - Resource location. + type: str + returned: always + sample: eastus + name: + description: + - Resource name. + type: str + returned: always + sample: myVM + properties: + description: + - Specifies the virtual machine's property. + type: complex + returned: always + contains: + diagnosticsProfile: + description: + - Specifies the boot diagnostic settings state. + type: complex + returned: always + contains: + bootDiagnostics: + description: + - A debugging feature, which to view Console Output and Screenshot to diagnose VM status. + type: dict + returned: always + sample: { + "enabled": true, + "storageUri": "https://vxisurgdiag.blob.core.windows.net/" + } + hardwareProfile: + description: + - Specifies the hardware settings for the virtual machine. + type: dict + returned: always + sample: { + "vmSize": "Standard_D2s_v3" + } + networkProfile: + description: + - Specifies the network interfaces of the virtual machine. + type: complex + returned: always + contains: + networkInterfaces: + description: + - Describes a network interface reference. + type: list + returned: always + sample: + - { + "id": "/subscriptions/xxxx...xxxx/resourceGroups/v-xisuRG/providers/Microsoft.Network/networkInterfaces/myvm441" + } + osProfile: + description: + - Specifies the operating system settings for the virtual machine. + type: complex + returned: always + contains: + adminUsername: + description: + - Specifies the name of the administrator account. + type: str + returned: always + sample: azureuser + allowExtensionOperations: + description: + - Specifies whether extension operations should be allowed on the virtual machine. + - This may only be set to False when no extensions are present on the virtual machine. + type: bool + returned: always + sample: true + computerName: + description: + - Specifies the host OS name of the virtual machine. + type: str + returned: always + sample: myVM + requireGuestProvisionSignale: + description: + - Specifies the host require guest provision signal or not. + type: bool + returned: always + sample: true + secrets: + description: + - Specifies set of certificates that should be installed onto the virtual machine. + type: list + returned: always + sample: [] + linuxConfiguration: + description: + - Specifies the Linux operating system settings on the virtual machine. + type: dict + returned: when OS type is Linux + sample: { + "disablePasswordAuthentication": false, + "provisionVMAgent": true + } + provisioningState: + description: + - The provisioning state. + type: str + returned: always + sample: Succeeded + vmID: + description: + - Specifies the VM unique ID which is a 128-bits identifier that is encoded and stored in all Azure laaS VMs SMBIOS. + - It can be read using platform BIOS commands. + type: str + returned: always + sample: "eb86d9bb-6725-4787-a487-2e497d5b340c" + storageProfile: + description: + - Specifies the storage account type for the managed disk. + type: complex + returned: always + contains: + dataDisks: + description: + - Specifies the parameters that are used to add a data disk to virtual machine. + type: list + returned: always + sample: + - { + "caching": "None", + "createOption": "Attach", + "diskSizeGB": 1023, + "lun": 2, + "managedDisk": { + "id": "/subscriptions/xxxx....xxxx/resourceGroups/V-XISURG/providers/Microsoft.Compute/disks/testdisk2", + "storageAccountType": "StandardSSD_LRS" + }, + "name": "testdisk2" + } + - { + "caching": "None", + "createOption": "Attach", + "diskSizeGB": 1023, + "lun": 1, + "managedDisk": { + "id": "/subscriptions/xxxx...xxxx/resourceGroups/V-XISURG/providers/Microsoft.Compute/disks/testdisk3", + "storageAccountType": "StandardSSD_LRS" + }, + "name": "testdisk3" + } + + imageReference: + description: + - Specifies information about the image to use. + type: dict + returned: always + sample: { + "offer": "UbuntuServer", + "publisher": "Canonical", + "sku": "18.04-LTS", + "version": "latest" + } + osDisk: + description: + - Specifies information about the operating system disk used by the virtual machine. + type: dict + returned: always + sample: { + "caching": "ReadWrite", + "createOption": "FromImage", + "diskSizeGB": 30, + "managedDisk": { + "id": "/subscriptions/xxx...xxxx/resourceGroups/v-xisuRG/providers/Microsoft.Compute/disks/myVM_disk1_xxx", + "storageAccountType": "Premium_LRS" + }, + "name": "myVM_disk1_xxx", + "osType": "Linux" + } + type: + description: + - The type of identity used for the virtual machine. + type: str + returned: always + sample: "Microsoft.Compute/virtualMachines" +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_rest import GenericRestClient + +try: + from msrestazure.azure_exceptions import CloudError + from msrest.service_client import ServiceClient + from msrestazure.tools import resource_id, is_valid_resource_id + import json + +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMResourceInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + url=dict( + type='str' + ), + provider=dict( + type='str' + ), + resource_group=dict( + type='str' + ), + resource_type=dict( + type='str' + ), + resource_name=dict( + type='str' + ), + subresource=dict( + type='list', + default=[] + ), + method=dict( + type='str', + default='GET', + choices=["GET", "PUT", "POST", "HEAD", "PATCH", "DELETE", "MERGE"] + ), + api_version=dict( + type='str' + ) + ) + # store the results of the module operation + self.results = dict( + response=[] + ) + self.mgmt_client = None + self.url = None + self.api_version = None + self.provider = None + self.resource_group = None + self.resource_type = None + self.resource_name = None + self.subresource = [] + super(AzureRMResourceInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_resource_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_resource_facts' module has been renamed to 'azure_rm_resource_info'", version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient, + base_url=self._cloud_environment.endpoints.resource_manager) + + if self.url is None: + orphan = None + rargs = dict() + rargs['subscription'] = self.subscription_id + rargs['resource_group'] = self.resource_group + if not (self.provider is None or self.provider.lower().startswith('.microsoft')): + rargs['namespace'] = "Microsoft." + self.provider + else: + rargs['namespace'] = self.provider + + if self.resource_type is not None and self.resource_name is not None: + rargs['type'] = self.resource_type + rargs['name'] = self.resource_name + for i in range(len(self.subresource)): + resource_ns = self.subresource[i].get('namespace', None) + resource_type = self.subresource[i].get('type', None) + resource_name = self.subresource[i].get('name', None) + if resource_type is not None and resource_name is not None: + rargs['child_namespace_' + str(i + 1)] = resource_ns + rargs['child_type_' + str(i + 1)] = resource_type + rargs['child_name_' + str(i + 1)] = resource_name + else: + orphan = resource_type + else: + orphan = self.resource_type + + self.url = resource_id(**rargs) + + if orphan is not None: + self.url += '/' + orphan + + # if api_version was not specified, get latest one + if not self.api_version: + try: + # extract provider and resource type + if "/providers/" in self.url: + provider = self.url.split("/providers/")[1].split("/")[0] + resourceType = self.url.split(provider + "/")[1].split("/")[0] + url = "/subscriptions/" + self.subscription_id + "/providers/" + provider + api_versions = json.loads(self.mgmt_client.query(url, self.method, {'api-version': '2015-01-01'}, None, None, [200], 0, 0).text) + for rt in api_versions['resourceTypes']: + if rt['resourceType'].lower() == resourceType.lower(): + self.api_version = rt['apiVersions'][0] + break + else: + # if there's no provider in API version, assume Microsoft.Resources + self.api_version = '2018-05-01' + if not self.api_version: + self.fail("Couldn't find api version for {0}/{1}".format(provider, resourceType)) + except Exception as exc: + self.fail("Failed to obtain API version: {0}".format(str(exc))) + + self.results['url'] = self.url + + query_parameters = {} + query_parameters['api-version'] = self.api_version + + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + skiptoken = None + + while True: + if skiptoken: + query_parameters['skiptoken'] = skiptoken + response = self.mgmt_client.query(self.url, self.method, query_parameters, header_parameters, None, [200, 404], 0, 0) + try: + response = json.loads(response.text) + if isinstance(response, dict): + if response.get('value'): + self.results['response'] = self.results['response'] + response['value'] + skiptoken = response.get('nextLink') + else: + self.results['response'] = self.results['response'] + [response] + except Exception as e: + self.fail('Failed to parse response: ' + str(e)) + if not skiptoken: + break + return self.results + + +def main(): + AzureRMResourceInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_resourcegroup.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_resourcegroup.py new file mode 100644 index 000000000..553330ab1 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_resourcegroup.py @@ -0,0 +1,286 @@ +#!/usr/bin/python +# +# Copyright (c) 2016 Matt Davis, +# Chris Houseknecht, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_resourcegroup +version_added: "0.1.2" +short_description: Manage Azure resource groups +description: + - Create, update and delete a resource group. +options: + force_delete_nonempty: + description: + - Remove a resource group and all associated resources. + - Use with I(state=absent) to delete a resource group that contains resources. + type: bool + aliases: + - force + default: 'no' + location: + description: + - Azure location for the resource group. Required when creating a new resource group. + - Cannot be changed once resource group is created. + name: + description: + - Name of the resource group. + required: true + state: + description: + - Assert the state of the resource group. Use C(present) to create or update and C(absent) to delete. + - When C(absent) a resource group containing resources will not be removed unless the I(force) option is used. + default: present + choices: + - absent + - present +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Chris Houseknecht (@chouseknecht) + - Matt Davis (@nitzmahone) + +''' + +EXAMPLES = ''' + - name: Create a resource group + azure_rm_resourcegroup: + name: myResourceGroup + location: westus + tags: + testing: testing + delete: never + + - name: Delete a resource group + azure_rm_resourcegroup: + name: myResourceGroup + state: absent + + - name: Delete a resource group including resources it contains + azure_rm_resourcegroup: + name: myResourceGroup + force_delete_nonempty: yes + state: absent +''' +RETURN = ''' +contains_resources: + description: + - Whether or not the resource group contains associated resources. + returned: always + type: bool + sample: True +state: + description: + - Current state of the resource group. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup" + location: + description: + - Azure location for the resource group. + returned: always + type: str + sample: westus + name: + description: + - The resource group name. + returned: always + type: str + sample: Testing + provisioning_state: + description: + - Provisioning state of the resource group. + returned: always + type: str + sample: Succeeded + tags: + description: + - The resource group's tags. + returned: always + type: dict + sample: { + "delete": "on-exit", + "testing": "no" + } +''' + +try: + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + pass + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase, normalize_location_name + + +def resource_group_to_dict(rg): + return dict( + id=rg.id, + name=rg.name, + location=rg.location, + tags=rg.tags, + provisioning_state=rg.properties.provisioning_state + ) + + +class AzureRMResourceGroup(AzureRMModuleBase): + + def __init__(self): + self.module_arg_spec = dict( + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + location=dict(type='str'), + force_delete_nonempty=dict(type='bool', default=False, aliases=['force']) + ) + + self.name = None + self.state = None + self.location = None + self.tags = None + self.force_delete_nonempty = None + + self.results = dict( + changed=False, + contains_resources=False, + state=dict(), + ) + + super(AzureRMResourceGroup, self).__init__(self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + results = dict() + changed = False + rg = None + contains_resources = False + + try: + self.log('Fetching resource group {0}'.format(self.name)) + rg = self.rm_client.resource_groups.get(self.name) + self.check_provisioning_state(rg, self.state) + contains_resources = self.resources_exist() + + results = resource_group_to_dict(rg) + if self.state == 'absent': + self.log("CHANGED: resource group {0} exists but requested state is 'absent'".format(self.name)) + changed = True + elif self.state == 'present': + update_tags, results['tags'] = self.update_tags(results['tags']) + self.log("update tags %s" % update_tags) + self.log("new tags: %s" % str(results['tags'])) + if update_tags: + changed = True + + if self.location and normalize_location_name(self.location) != results['location']: + self.fail("Resource group '{0}' already exists in location '{1}' and cannot be " + "moved.".format(self.name, results['location'])) + except ResourceNotFoundError: + self.log('Resource group {0} does not exist'.format(self.name)) + if self.state == 'present': + self.log("CHANGED: resource group {0} does not exist but requested state is " + "'present'".format(self.name)) + changed = True + + self.results['changed'] = changed + self.results['state'] = results + self.results['contains_resources'] = contains_resources + + if self.check_mode: + return self.results + + if changed: + if self.state == 'present': + if not rg: + # Create resource group + self.log("Creating resource group {0}".format(self.name)) + if not self.location: + self.fail("Parameter error: location is required when creating a resource group.") + if self.name_exists(): + self.fail("Error: a resource group with the name {0} already exists in your subscription." + .format(self.name)) + params = self.rm_models.ResourceGroup( + location=self.location, + tags=self.tags + ) + else: + # Update resource group + params = self.rm_models.ResourceGroup( + location=results['location'], + tags=results['tags'] + ) + self.results['state'] = self.create_or_update_resource_group(params) + elif self.state == 'absent': + if contains_resources and not self.force_delete_nonempty: + self.fail("Error removing resource group {0}. Resources exist within the group. " + "Use `force_delete_nonempty` to force delete. " + "To list resources under {0}, use `azure_rm_resourcegroup_facts` module with `list_resources` option.".format(self.name)) + self.delete_resource_group() + + return self.results + + def create_or_update_resource_group(self, params): + try: + result = self.rm_client.resource_groups.create_or_update(self.name, params) + except Exception as exc: + self.fail("Error creating or updating resource group {0} - {1}".format(self.name, str(exc))) + return resource_group_to_dict(result) + + def delete_resource_group(self): + try: + poller = self.rm_client.resource_groups.begin_delete(self.name) + self.get_poller_result(poller) + except Exception as exc: + self.fail("Error delete resource group {0} - {1}".format(self.name, str(exc))) + + # The delete operation doesn't return anything. + # If we got here, assume all is good + self.results['state']['status'] = 'Deleted' + return True + + def resources_exist(self): + found = False + try: + response = self.rm_client.resources.list_by_resource_group(self.name) + except AttributeError: + response = self.rm_client.resource_groups.list_resources(self.name) + except Exception as exc: + self.fail("Error checking for resource existence in {0} - {1}".format(self.name, str(exc))) + + for item in response: + found = True + break + return found + + def name_exists(self): + try: + exists = self.rm_client.resource_groups.check_existence(self.name) + except Exception as exc: + self.fail("Error checking for existence of name {0} - {1}".format(self.name, str(exc))) + return exists + + +def main(): + AzureRMResourceGroup() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_resourcegroup_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_resourcegroup_info.py new file mode 100644 index 000000000..969143a17 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_resourcegroup_info.py @@ -0,0 +1,235 @@ +#!/usr/bin/python +# +# Copyright (c) 2016 Matt Davis, +# Chris Houseknecht, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_resourcegroup_info + +version_added: "0.1.2" + +short_description: Get resource group facts + +description: + - Get facts for a specific resource group or all resource groups. + +options: + name: + description: + - Limit results to a specific resource group. + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + list_resources: + description: + - List all resources under the resource group. + - Note this will cost network overhead for each resource group. Suggest use this when I(name) set. + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Chris Houseknecht (@chouseknecht) + - Matt Davis (@nitzmahone) + +''' + +EXAMPLES = ''' + - name: Get facts for one resource group + azure_rm_resourcegroup_info: + name: myResourceGroup + + - name: Get facts for all resource groups + azure_rm_resourcegroup_info: + + - name: Get facts by tags + azure_rm_resourcegroup_info: + tags: + - testing + - foo:bar + + - name: Get facts for one resource group including resources it contains + azure_rm_resourcegroup_info: + name: myResourceGroup + list_resources: yes +''' +RETURN = ''' +resourcegroups: + description: + - List of resource group dicts. + returned: always + type: list + contains: + id: + description: + - Resource id. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup" + name: + description: + - Resource group name. + returned: always + type: str + sample: foo + tags: + description: + - Tags assigned to resource group. + returned: always + type: dict + sample: { "tag": "value" } + resources: + description: + - List of resources under the resource group. + returned: when I(list_resources=yes). + type: list + contains: + id: + description: + - Resource id. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/virtualMa + chines/myVirtualMachine" + name: + description: + - Resource name. + returned: always + type: str + sample: myVirtualMachine + location: + description: + - Resource region. + returned: always + type: str + sample: eastus + type: + description: + - Resource type. + returned: always + type: str + sample: "Microsoft.Compute/virtualMachines" + tags: + description: + - Tags to assign to the managed disk. + returned: always + type: dict + sample: { "tag": "value" } +''' + +try: + from azure.core.exceptions import ResourceNotFoundError +except Exception: + # This is handled in azure_rm_common + pass + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + + +AZURE_OBJECT_CLASS = 'ResourceGroup' + + +class AzureRMResourceGroupInfo(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type='str'), + tags=dict(type='list', elements='str'), + list_resources=dict(type='bool') + ) + + self.results = dict( + changed=False, + resourcegroups=[] + ) + + self.name = None + self.tags = None + self.list_resources = None + + super(AzureRMResourceGroupInfo, self).__init__(self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + facts_module=True) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_resourcegroup_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_resourcegroup_facts' module has been renamed to 'azure_rm_resourcegroup_info'", version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.name: + result = self.get_item() + else: + result = self.list_items() + + if self.list_resources: + for item in result: + item['resources'] = self.list_by_rg(item['name']) + + if is_old_facts: + self.results['ansible_facts']['azure_resourcegroups'] = result + self.results['resourcegroups'] = result + + return self.results + + def get_item(self): + self.log('Get properties for {0}'.format(self.name)) + item = None + result = [] + + try: + item = self.rm_client.resource_groups.get(self.name) + except ResourceNotFoundError: + pass + + if item and self.has_tags(item.tags, self.tags): + result = [self.serialize_obj(item, AZURE_OBJECT_CLASS)] + + return result + + def list_items(self): + self.log('List all items') + try: + response = self.rm_client.resource_groups.list() + except Exception as exc: + self.fail("Failed to list all items - {0}".format(str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS)) + return results + + def list_by_rg(self, name): + self.log('List resources under resource group') + results = [] + try: + response = self.rm_client.resources.list_by_resource_group(name) + while True: + results.append(response.next().as_dict()) + except StopIteration: + pass + except Exception as exc: + self.fail('Error when listing resources under resource group {0}: {1}'.format(name, exc.message or str(exc))) + return results + + +def main(): + AzureRMResourceGroupInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_roleassignment.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_roleassignment.py new file mode 100644 index 000000000..6a930cc4b --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_roleassignment.py @@ -0,0 +1,373 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 Paul Aiton, (@paultaiton) +# Copyright (c) 2018 Yunge Zhu, (@yungezz) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_roleassignment +version_added: "0.1.2" +short_description: Manage Azure Role Assignment +description: + - Create and delete instance of Azure Role Assignment. + +options: + assignee_object_id: + description: + - The object id of assignee. This maps to the ID inside the Active Directory. + - It can point to a user, service principal or security group. + - Required when creating role assignment. + aliases: + - assignee + id: + description: + - Fully qualified id of assignment to delete or create. + - Mutually Exclusive with I(scope) and I(name) + name: + description: + - Unique name of role assignment. + - The role assignment name must be a GUID, sample as "3ce0cbb0-58c4-4e6d-a16d-99d86a78b3ca". + - Mutually Exclusive with I(id) + role_definition_id: + description: + - The role definition id used in the role assignment. + - Required when creating role assignment. + scope: + description: + - The scope of the role assignment to create. + - For example, use /subscriptions/{subscription-id}/ for subscription. + - /subscriptions/{subscription-id}/resourceGroups/{resource-group-name} for resource group. + - /subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/providers/{resource-provider}/{resource-type}/{resource-name} for resource. + - Mutually Exclusive with I(id) + state: + description: + - Assert the state of the role assignment. + - Use C(present) to create or update a role assignment and C(absent) to delete it. + - If C(present), then I(role_definition_id) and I(assignee_object_id) are both required + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Yunge Zhu(@yungezz) + - Paul Aiton(@paultaiton) + +''' + +EXAMPLES = ''' + - name: Create a role assignment + azure_rm_roleassignment: + scope: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + assignee_object_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + role_definition_id: + "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/providers/Microsoft.Authorization/roleDefinitions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + + - name: Create a role assignment + azure_rm_roleassignment: + name: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + scope: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + assignee_object_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + role_definition_id: + "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/providers/Microsoft.Authorization/roleDefinitions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + + - name: Delete a role assignment + azure_rm_roleassignment: + name: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + scope: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + state: absent + + - name: Delete a role assignment + azure_rm_roleassignment: + id: /subscriptions/xxx-sub-guid-xxx/resourceGroups/rgname/providers/Microsoft.Authorization/roleAssignments/xxx-assign-guid-xxx" + state: absent + + - name: Delete a role assignment + azure_rm_roleassignment: + scope: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + assignee_object_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + role_definition_id: + "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/providers/Microsoft.Authorization/roleDefinitions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + state: absent +''' + +RETURN = ''' +id: + description: + - Id of current role assignment. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/providers/Microsoft.Authorization/roleAssignments/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +name: + description: + - Name of role assignment. + type: str + returned: always + sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +type: + description: + - Type of role assignment. + type: str + returned: always + sample: Microsoft.Authorization/roleAssignments +assignee_object_id: + description: + - Principal Id of the role assignee. + type: str + returned: always + sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +principal_type: + description: + - Principal type of the role assigned to. + type: str + returned: always + sample: ServicePrincipal +role_definition_id: + description: + - Role definition id that was assigned to principal_id. + type: str + returned: always + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/providers/Microsoft.Authorization/roleDefinitions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +scope: + description: + - The role assignment scope. + type: str + returned: always + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +''' + +try: + import uuid + from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMRoleAssignment(AzureRMModuleBase): + """Configuration class for an Azure RM Role Assignment""" + + def __init__(self): + self.module_arg_spec = dict( + assignee_object_id=dict(type='str', aliases=['assignee']), + id=dict(type='str'), + name=dict(type='str'), + role_definition_id=dict(type='str'), + scope=dict(type='str'), + state=dict(type='str', default='present', choices=['present', 'absent']) + ) + + self.assignee_object_id = None + self.id = None + self.name = None + self.role_definition_id = None + self.scope = None + self.state = None + + self.results = dict( + changed=False, + id=None, + ) + + mutually_exclusive = [['name', 'id'], ['scope', 'id']] + required_one_of = [['scope', 'id']] + required_if = [ + ["state", "present", ["assignee_object_id", "role_definition_id"]] + ] + + super(AzureRMRoleAssignment, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + required_one_of=required_one_of, + required_if=required_if, + mutually_exclusive=mutually_exclusive) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.name and not self.scope: + self.fail("Parameter Error: setting name requires a scope to also be set.") + + existing_assignment = None + response = None + + existing_assignment = self.get_roleassignment() + + if existing_assignment: + self.set_results(existing_assignment) + + if self.state == 'present': + # check if the role assignment exists + if not existing_assignment: + self.log("Role assignment doesn't exist in this scope") + + self.results['changed'] = True + + if self.check_mode: + return self.results + response = self.create_roleassignment() + self.set_results(response) + + else: + self.log("Role assignment already exists, not updatable") + self.log('Result: {0}'.format(existing_assignment)) + + elif self.state == 'absent': + if existing_assignment: + self.log("Delete role assignment") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_roleassignment(existing_assignment.get('id')) + + self.log('role assignment deleted') + + else: + # If assignment doesn't exist, that's the desired state. + self.log("role assignment {0} does not exist.".format(self.name)) + + return self.results + + def create_roleassignment(self): + ''' + Creates role assignment. + + :return: deserialized role assignment + ''' + self.log("Creating role assignment {0}".format(self.name)) + + response = None + try: + # pylint: disable=missing-kwoa + parameters = self.authorization_models.RoleAssignmentCreateParameters(role_definition_id=self.role_definition_id, + principal_id=self.assignee_object_id) + if self.id: + response = self.authorization_client.role_assignments.create_by_id(role_id=self.id, + parameters=parameters) + elif self.scope: + if not self.name: + self.name = str(uuid.uuid4()) + response = self.authorization_client.role_assignments.create(scope=self.scope, + role_assignment_name=self.name, + parameters=parameters) + + except Exception as exc: + self.log('Error attempting to create role assignment.') + self.fail("Error creating role assignment: {0}".format(str(exc))) + return self.roleassignment_to_dict(response) + + def delete_roleassignment(self, assignment_id): + ''' + Deletes specified role assignment. + + :return: True + ''' + self.log("Deleting the role assignment {0}".format(self.name)) + try: + response = self.authorization_client.role_assignments.delete_by_id(role_id=assignment_id) + except Exception as e: + self.log('Error attempting to delete the role assignment.') + self.fail("Error deleting the role assignment: {0}".format(str(e))) + + return True + + def get_roleassignment(self): + ''' + Gets the properties of the specified role assignment. + + :return: deserialized role assignment dictionary + ''' + self.log("Checking if the role assignment {0} is present".format(self.name)) + + role_assignment = None + + if self.id: + try: + response = self.authorization_client.role_assignments.get_by_id(role_id=self.id) + role_assignment = self.roleassignment_to_dict(response) + if role_assignment and self.assignee_object_id and role_assignment.get('assignee_object_id') != self.assignee_object_id: + self.fail('State Mismatch Error: The assignment ID exists, but does not match the provided assignee.') + + if role_assignment and self.role_definition_id and (role_assignment.get('role_definition_id').split('/')[-1].lower() + != self.role_definition_id.split('/')[-1].lower()): + self.fail('State Mismatch Error: The assignment ID exists, but does not match the provided role.') + + except Exception as ex: + self.log("Didn't find role assignments id {0}".format(self.id)) + + elif self.name and self.scope: + try: + response = self.authorization_client.role_assignments.get(scope=self.scope, role_assignment_name=self.name) + role_assignment = self.roleassignment_to_dict(response) + if role_assignment and self.assignee_object_id and role_assignment.get('assignee_object_id') != self.assignee_object_id: + self.fail('State Mismatch Error: The assignment name exists, but does not match the provided assignee.') + + if role_assignment and self.role_definition_id and (role_assignment.get('role_definition_id').split('/')[-1].lower() + != self.role_definition_id.split('/')[-1].lower()): + self.fail('State Mismatch Error: The assignment name exists, but does not match the provided role.') + + except Exception as ex: + self.log("Didn't find role assignment by name {0} at scope {1}".format(self.name, self.scope)) + + else: + try: + if self.scope and self.assignee_object_id and self.role_definition_id: + response = list(self.authorization_client.role_assignments.list()) + response = [self.roleassignment_to_dict(role_assignment) for role_assignment in response] + response = [role_assignment for role_assignment in response if role_assignment.get('scope') == self.scope] + response = [role_assignment for role_assignment in response if role_assignment.get('assignee_object_id') == self.assignee_object_id] + response = [role_assignment for role_assignment in response if (role_assignment.get('role_definition_id').split('/')[-1].lower() + == self.role_definition_id.split('/')[-1].lower())] + else: + self.fail('If id or name are not supplied, then assignee_object_id and role_definition_id are required.') + if response: + role_assignment = response[0] + except Exception as ex: + self.log("Didn't find role assignments for subscription {0}".format(self.subscription_id)) + + return role_assignment + + def set_results(self, assignment): + self.results['id'] = assignment.get('id') + self.results['name'] = assignment.get('name') + self.results['type'] = assignment.get('type') + self.results['assignee_object_id'] = assignment.get('assignee_object_id') + self.results['principal_type'] = assignment.get('principal_type') + self.results['role_definition_id'] = assignment.get('role_definition_id') + self.results['scope'] = assignment.get('scope') + + def roleassignment_to_dict(self, assignment): + return dict( + assignee_object_id=assignment.principal_id, + id=assignment.id, + name=assignment.name, + principal_id=assignment.principal_id, + principal_type=assignment.principal_type, + role_definition_id=assignment.role_definition_id, + scope=assignment.scope, + type=assignment.type + ) + + +def main(): + """Main execution""" + AzureRMRoleAssignment() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_roleassignment_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_roleassignment_info.py new file mode 100644 index 000000000..4fa338a81 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_roleassignment_info.py @@ -0,0 +1,335 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 Paul Aiton, (@paultaiton) +# Copyright (c) 2019 Yunge Zhu, (@yungezz) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_roleassignment_info +version_added: "0.1.2" +short_description: Gets Azure Role Assignment facts +description: + - Gets facts of Azure Role Assignment. + +options: + assignee: + description: + - Object id of a user, group or service principal. + - Mutually exclusive with I(name) and I(id). + aliases: + - assignee_object_id + id: + description: + - Fqid of role assignment to look up. + - If set, I(role_definition_id) and I(scope) will be silently ignored. + - Mutually exclusive with I(assignee) and I(name). + name: + description: + - Name of role assignment. + - Requires that I(scope) also be set. + - Mutual exclusive with I(assignee) and I(id). + role_definition_id: + description: + - Resource id of role definition. + scope: + description: + - The scope to query for role assignments. + - For example, use /subscriptions/{subscription-id}/ for a subscription. + - /subscriptions/{subscription-id}/resourceGroups/{resourcegroup-name} for a resource group. + - /subscriptions/{subscription-id}/resourceGroups/{resourcegroup-name}/providers/{resource-provider}/{resource-type}/{resource-name} for a resource. + - By default will return all inhereted assignments from parent scopes, see I(strict_scope_match). + strict_scope_match: + description: + - If strict_scope_match is True, role assignments will only be returned for the exact scope defined. + - Inherited role assignments will be excluded from results. + - Option will be silently ignored if no scope is provided. + type: bool + default: False + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Yunge Zhu(@yungezz) + - Paul Aiton(@paultaiton) + +''' + +EXAMPLES = ''' + - name: Get role assignments for specific service principal + azure_rm_roleassignment_info: + assignee: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + + - name: Get role assignments for specific scope that matches specific role definition + azure_rm_roleassignment_info: + scope: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + role_definition_id: /subscriptions/xxx-sub-guid-xxx/providers/Microsoft.Authorization/roleDefinitions/xxx-role-guid-xxxx + + - name: Get role assignments for specific scope with no inherited assignments + azure_rm_roleassignment_info: + scope: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + strict_scope_match: True + + - name: Get role assignments by name + azure_rm_roleassignment_info: + scope: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + name: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + + - name: Get role assignments by id + azure_rm_roleassignment_info: + id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/providers/Microsoft.Authorization/roleAssignments/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +''' + +RETURN = ''' +roleassignments: + description: + - List of role assignments. + returned: always + type: complex + contains: + id: + description: + - Id of role assignment. + type: str + returned: always + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/providers/Microsoft.Authorization/roleAssignments/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + name: + description: + - Name of role assignment. + type: str + returned: always + sample: myRoleAssignment + type: + description: + - Type of role assignment. + type: str + returned: always + sample: custom + principal_id: + description: + - Principal Id of the role assigned to. + type: str + returned: always + sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + principal_type: + description: + - Principal type of the role assigned to. + type: str + returned: always + sample: ServicePrincipal + role_definition_id: + description: + - Role definition id that was assigned to principal_id. + type: str + returned: always + sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + scope: + description: + - The role assignment scope. + type: str + returned: always + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +''' + +try: + from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMRoleAssignmentInfo(AzureRMModuleBase): + + def __init__(self): + self.module_arg_spec = dict( + assignee=dict(type='str', aliases=['assignee_object_id']), + id=dict(type='str'), + name=dict(type='str'), + role_definition_id=dict(type='str'), + scope=dict(type='str'), + strict_scope_match=dict(type='bool', default=False) + ) + + self.assignee = None + self.id = None + self.name = None + self.role_definition_id = None + self.scope = None + self.strict_scope_match = None + + self.results = dict( + changed=False, + roleassignments=[] + ) + + mutually_exclusive = [['name', 'assignee', 'id']] + + super(AzureRMRoleAssignmentInfo, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + facts_module=True, + mutually_exclusive=mutually_exclusive) + + def exec_module(self, **kwargs): + """Main module execution method""" + is_old_facts = self.module._name == 'azure_rm_roleassignment_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_roleassignment_facts' module has been renamed to 'azure_rm_roleassignment_info'", version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.id: + self.results['roleassignments'] = self.get_by_id() + elif self.name and self.scope: + self.results['roleassignments'] = self.get_by_name() + elif self.name and not self.scope: + self.fail("Parameter Error: Name requires a scope to also be set.") + elif self.scope: + self.results['roleassignments'] = self.list_by_scope() + elif self.assignee: + self.results['roleassignments'] = self.list_by_assignee() + else: + self.results['roleassignments'] = self.list_assignments() + + return self.results + + def get_by_id(self): + ''' + Gets the role assignments by specific assignment id. + + :return: deserialized role assignment dictionary + ''' + self.log("Lists role assignment by id {0}".format(self.id)) + + results = [] + try: + response = [self.authorization_client.role_assignments.get_by_id(role_id=self.id)] + response = [self.roleassignment_to_dict(a) for a in response] + results = response + + except Exception as ex: + self.log("Didn't find role assignments id {0}".format(self.scope)) + + return results + + def get_by_name(self): + ''' + Gets the properties of the specified role assignment by name. + + :return: deserialized role assignment dictionary + ''' + self.log("Gets role assignment {0} by name".format(self.name)) + + results = [] + + try: + response = [self.authorization_client.role_assignments.get(scope=self.scope, role_assignment_name=self.name)] + response = [self.roleassignment_to_dict(a) for a in response] + + # If role_definition_id is set, we only want results matching that id. + if self.role_definition_id: + response = [role_assignment for role_assignment in response if (role_assignment.get('role_definition_id').split('/')[-1].lower() + == self.role_definition_id.split('/')[-1].lower())] + + results = response + + except Exception as ex: + self.log("Didn't find role assignment {0} in scope {1}".format(self.name, self.scope)) + + return results + + def list_by_assignee(self): + ''' + Gets the role assignments by assignee. + + :return: deserialized role assignment dictionary + ''' + self.log("Gets role assignment {0} by name".format(self.name)) + + filter = "principalId eq '{0}'".format(self.assignee) + return self.list_assignments(filter=filter) + + def list_assignments(self, filter=None): + ''' + Returns a list of assignments. + ''' + results = [] + + try: + response = list(self.authorization_client.role_assignments.list(filter=filter)) + response = [self.roleassignment_to_dict(a) for a in response] + + # If role_definition_id is set, we only want results matching that id. + if self.role_definition_id: + response = [role_assignment for role_assignment in response if (role_assignment.get('role_definition_id').split('/')[-1].lower() + == self.role_definition_id.split('/')[-1].lower())] + + results = response + + except Exception as ex: + self.log("Didn't find role assignments in subscription {0}.".format(self.subscription_id)) + + return results + + def list_by_scope(self): + ''' + Lists the role assignments by specific scope. + + :return: deserialized role assignment dictionary + ''' + self.log("Lists role assignment by scope {0}".format(self.scope)) + + results = [] + try: + # atScope filter limits to exact scope plus parent scopes. Without it will return all children too. + response = list(self.authorization_client.role_assignments.list_for_scope(scope=self.scope, filter='atScope()')) + + response = [self.roleassignment_to_dict(role_assignment) for role_assignment in response] + + # If assignee is set we only want results matching that assignee. + if self.assignee: + response = [role_assignment for role_assignment in response if role_assignment.get('principal_id').lower() == self.assignee.lower()] + + # If strict_scope_match is true we only want results matching exact scope. + if self.strict_scope_match: + response = [role_assignment for role_assignment in response if role_assignment.get('scope').lower() == self.scope.lower()] + + # If role_definition_id is set, we only want results matching that id. + if self.role_definition_id: + response = [role_assignment for role_assignment in response if (role_assignment.get('role_definition_id').split('/')[-1].lower() + == self.role_definition_id.split('/')[-1].lower())] + + results = response + + except Exception as ex: + self.log("Didn't find role assignments at scope {0}".format(self.scope)) + + return results + + def roleassignment_to_dict(self, assignment): + return dict( + assignee_object_id=assignment.principal_id, + id=assignment.id, + name=assignment.name, + principal_id=assignment.principal_id, + principal_type=assignment.principal_type, + role_definition_id=assignment.role_definition_id, + scope=assignment.scope, + type=assignment.type + ) + + +def main(): + """Main execution""" + AzureRMRoleAssignmentInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_roledefinition.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_roledefinition.py new file mode 100644 index 000000000..d1cc4c2a5 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_roledefinition.py @@ -0,0 +1,409 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Yunge Zhu, (@yungezz) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_roledefinition +version_added: "0.1.2" +short_description: Manage Azure Role Definition +description: + - Create, update and delete instance of Azure Role Definition. + +options: + name: + description: + - Unique name of role definition. + required: True + type: str + permissions: + description: + - Set of role definition permissions. + - See U(https://docs.microsoft.com/en-us/azure/app-service/app-service-web-overview) for more info. + type: list + elements: dict + suboptions: + actions: + description: + - List of allowed actions. + type: list + elements: str + not_actions: + description: + - List of denied actions. + type: list + elements: str + data_actions: + description: + - List of allowed data actions. + type: list + elements: str + not_data_actions: + description: + - List of denied data actions. + type: list + elements: str + assignable_scopes: + description: + - List of assignable scopes of this definition. + elements: str + type: list + scope: + description: + - The scope of the role definition. + type: str + description: + description: + - The role definition description. + type: str + state: + description: + - Assert the state of the role definition. + - Use C(present) to create or update a role definition; use C(absent) to delete it. + type: str + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Yunge Zhu(@yungezz) + +''' + +EXAMPLES = ''' + - name: Create a role definition + azure_rm_roledefinition: + name: myTestRole + scope: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myresourceGroup + permissions: + - actions: + - "Microsoft.Compute/virtualMachines/read" + data_actions: + - "Microsoft.Storage/storageAccounts/blobServices/containers/blobs/write" + assignable_scopes: + - "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +''' + +RETURN = ''' +id: + description: + - ID of current role definition. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/providers/Microsoft.Authorization/roleDefinitions/roleDefinitionId" +''' + +import uuid +from ansible.module_utils._text import to_native + +try: + from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + from msrestazure.azure_operation import AzureOperationPoller + from azure.core.polling import LROPoller + from msrest.serialization import Model + from azure.mgmt.authorization import AuthorizationManagementClient + from azure.mgmt.authorization.model import (RoleDefinition, Permission) + +except ImportError: + # This is handled in azure_rm_common + pass + + +permission_spec = dict( + actions=dict( + elements='str', + type='list' + ), + not_actions=dict( + type='list', + elements='str' + ), + data_actions=dict( + type='list', + elements='str' + ), + not_data_actions=dict( + type='list', + elements='str' + ), +) + + +def roledefinition_to_dict(role): + result = dict( + id=role.id, + name=role.name, + type=role.role_type, + assignable_scopes=role.assignable_scopes, + description=role.description, + role_name=role.role_name + ) + if role.permissions: + result['permissions'] = [dict( + actions=p.actions, + not_actions=p.not_actions, + data_actions=p.data_actions, + not_data_actions=p.not_data_actions + ) for p in role.permissions] + return result + + +class Actions: + NoAction, CreateOrUpdate, Delete = range(3) + + +class AzureRMRoleDefinition(AzureRMModuleBase): + """Configuration class for an Azure RM Role definition resource""" + + def __init__(self): + self.module_arg_spec = dict( + name=dict( + type='str', + required=True + ), + scope=dict( + type='str' + ), + permissions=dict( + type='list', + elements='dict', + options=permission_spec + ), + assignable_scopes=dict( + type='list', + elements='str' + ), + description=dict( + type='str' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ), + ) + + self.name = None + self.scope = None + self.permissions = None + self.description = None + self.assignable_scopes = None + + self.results = dict( + changed=False, + id=None, + ) + self.state = None + self.to_do = Actions.NoAction + + self.role = None + + self._client = None + + super(AzureRMRoleDefinition, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=False) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()): + if hasattr(self, key): + setattr(self, key, kwargs[key]) + + old_response = None + response = None + + # get management client + self._client = self.get_mgmt_svc_client(AuthorizationManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager, + is_track2=True, + api_version="2018-01-01-preview") + + self.scope = self.build_scope() + + # get existing role definition + old_response = self.get_roledefinition() + + if old_response: + self.results['id'] = old_response['id'] + self.role = old_response + + if self.state == 'present': + # check if the role definition exists + if not old_response: + self.log("Role definition doesn't exist in this scope") + + self.to_do = Actions.CreateOrUpdate + + else: + # existing role definition, do update + self.log("Role definition already exists") + self.log('Result: {0}'.format(old_response)) + + # compare if role definition changed + if self.check_update(old_response): + self.to_do = Actions.CreateOrUpdate + + elif self.state == 'absent': + if old_response: + self.log("Delete role definition") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_roledefinition(old_response['name']) + + self.log('role definition deleted') + + else: + self.log("role definition {0} not exists.".format(self.name)) + + if self.to_do == Actions.CreateOrUpdate: + self.log('Need to Create/Update role definition') + self.results['changed'] = True + + if self.check_mode: + return self.results + + response = self.create_update_roledefinition() + self.results['id'] = response['id'] + + return self.results + + # build scope + def build_scope(self): + subscription_scope = '/subscriptions/' + self.subscription_id + if self.scope is None: + return subscription_scope + return self.scope + + # check update + def check_update(self, old_definition): + if self.description and self.description != old_definition['description']: + return True + if self.permissions: + if len(self.permissions) != len(old_definition['permissions']): + return True + existing_permissions = self.permissions_to_set(old_definition['permissions']) + new_permissions = self.permissions_to_set(self.permissions) + if existing_permissions != new_permissions: + return True + if self.assignable_scopes and self.assignable_scopes != old_definition['assignable_scopes']: + return True + return False + + def permissions_to_set(self, permissions): + new_permissions = [str(dict( + actions=(set([to_native(a) for a in item.get('actions')]) if item.get('actions') else None), + not_actions=(set([to_native(a) for a in item.get('not_actions')]) if item.get('not_actions') else None), + data_actions=(set([to_native(a) for a in item.get('data_actions')]) if item.get('data_actions') else None), + not_data_actions=(set([to_native(a) for a in item.get('not_data_actions')]) if item.get('not_data_actions') else None), + )) for item in permissions] + return set(new_permissions) + + def create_update_roledefinition(self): + ''' + Creates or updates role definition. + + :return: deserialized role definition + ''' + self.log("Creating / Updating role definition {0}".format(self.name)) + + try: + permissions = None + if self.permissions: + permissions = [AuthorizationManagementClient.models("2018-01-01-preview").Permission( + actions=p.get('actions', None), + not_actions=p.get('not_actions', None), + data_actions=p.get('data_actions', None), + not_data_actions=p.get('not_data_actions', None) + ) for p in self.permissions] + role_definition = AuthorizationManagementClient.models("2018-01-01-preview").RoleDefinition( + role_name=self.name, + description=self.description, + permissions=permissions, + assignable_scopes=self.assignable_scopes, + role_type='CustomRole') + if self.role: + role_definition.name = self.role['name'] + response = self._client.role_definitions.create_or_update(role_definition_id=self.role['name'] if self.role else str(uuid.uuid4()), + scope=self.scope, + role_definition=role_definition) + if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller): + response = self.get_poller_result(response) + + except Exception as exc: + self.log('Error attempting to create role definition.') + self.fail("Error creating role definition: {0}".format(str(exc))) + return roledefinition_to_dict(response) + + def delete_roledefinition(self, role_definition_id): + ''' + Deletes specified role definition. + + :return: True + ''' + self.log("Deleting the role definition {0}".format(self.name)) + scope = self.build_scope() + try: + response = self._client.role_definitions.delete(scope=scope, + role_definition_id=role_definition_id) + if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller): + response = self.get_poller_result(response) + except Exception as e: + self.log('Error attempting to delete the role definition.') + self.fail("Error deleting the role definition: {0}".format(str(e))) + + return True + + def get_roledefinition(self): + ''' + Gets the properties of the specified role definition. + + :return: deserialized role definition state dictionary + ''' + self.log("Checking if the role definition {0} is present".format(self.name)) + + response = None + + try: + response = list(self._client.role_definitions.list(scope=self.scope)) + + if len(response) > 0: + self.log("Response : {0}".format(response)) + roles = [] + for r in response: + if r.role_name == self.name: + roles.append(r) + + if len(roles) == 1: + self.log("role definition : {0} found".format(self.name)) + return roledefinition_to_dict(roles[0]) + if len(roles) > 1: + self.fail("Found multiple role definitions: {0}".format(roles)) + + except Exception as ex: + self.log("Didn't find role definition {0}".format(self.name)) + + return False + + +def main(): + """Main execution""" + AzureRMRoleDefinition() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_roledefinition_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_roledefinition_info.py new file mode 100644 index 000000000..3899ce404 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_roledefinition_info.py @@ -0,0 +1,312 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Yunge Zhu, (@yungezz) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_roledefinition_info +version_added: "0.1.2" +short_description: Get Azure Role Definition facts +description: + - Get facts of Azure Role Definition. + +options: + scope: + description: + - The scope of role definition. + required: True + type: str + id: + description: + - Role definition id. + type: str + role_name: + description: + - Role name. + type: str + type: + description: + - Type of role. + type: str + choices: + - system + - custom + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Yunge Zhu(@yungezz) + +''' + +EXAMPLES = ''' + - name: List Role Definitions in scope + azure_rm_roledefinition_info: + scope: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup + + - name: Get Role Definition by name + azure_rm_roledefinition_info: + scope: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup + role_name: myRoleDefinition +''' + +RETURN = ''' +roledefinitions: + description: + - A list of Role Definition facts. + returned: always + type: complex + contains: + id: + description: + - Role Definition ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/providers/Microsoft.Authorization/roleDefinitions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + role_name: + description: + - Role name. + returned: always + type: str + sample: myCustomRoleDefinition + name: + description: + - System assigned role name. + returned: always + type: str + sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + assignable_scopes: + description: + - List of assignable scopes of this definition. + returned: always + type: list + sample: [ "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup" ] + permissions: + description: + - List of Role Definition permissions. + returned: always + type: complex + contains: + actions: + description: + - List of allowed actions. + returned: always + type: list + sample: [ 'Microsoft.Compute/virtualMachines/read' ] + not_actions: + description: + - List of denied actions. + returned: always + type: list + sample: [ 'Microsoft.Compute/virtualMachines/write' ] + data_actions: + description: + - List of allowed data actions. + returned: always + type: list + sample: [ 'Microsoft.Storage/storageAccounts/blobServices/containers/blobs/read' ] + not_data_actions: + description: + - List of denied data actions. + returned: always + type: list + sample: [ 'Microsoft.Storage/storageAccounts/blobServices/containers/blobs/write' ] +''' + +from ansible.module_utils._text import to_native + +try: + from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + from msrest.serialization import Model + from azure.mgmt.authorization import AuthorizationManagementClient + +except ImportError: + # This is handled in azure_rm_common + pass + + +def roledefinition_to_dict(role): + result = dict( + id=role.id, + name=role.name, + type=role.role_type, + assignable_scopes=role.assignable_scopes, + description=role.description, + role_name=role.role_name + ) + if role.permissions: + result['permissions'] = [dict( + actions=p.actions, + not_actions=p.not_actions, + data_actions=p.data_actions, + not_data_actions=p.not_data_actions + ) for p in role.permissions] + return result + + +class AzureRMRoleDefinitionInfo(AzureRMModuleBase): + def __init__(self): + self.module_arg_spec = dict( + scope=dict( + type='str', + required=True + ), + role_name=dict(type='str'), + id=dict(type='str'), + type=dict( + type='str', + choices=['custom', 'system'] + ), + ) + + self.role_name = None + self.scope = None + self.id = None + self.type = None + + self.results = dict( + changed=False + ) + + self._client = None + + super(AzureRMRoleDefinitionInfo, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=False) + + def exec_module(self, **kwargs): + """Main module execution method""" + is_old_facts = self.module._name == 'azure_rm_roledefinition_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_roledefinition_facts' module has been renamed to 'azure_rm_roledefinition_info'", version=(2.9, )) + + for key in list(self.module_arg_spec.keys()): + if hasattr(self, key): + setattr(self, key, kwargs[key]) + + if self.type: + self.type = self.get_role_type(self.type) + + # get management client + self._client = self.get_mgmt_svc_client(AuthorizationManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager, + is_track2=True, + api_version="2018-01-01-preview") + + if self.id: + self.results['roledefinitions'] = self.get_by_id() + elif self.role_name: + self.results['roledefinitions'] = self.get_by_role_name() + else: + self.results['roledefinitions'] = self.list() + + return self.results + + def get_role_type(self, role_type): + if role_type: + if role_type == 'custom': + return 'CustomRole' + else: + return 'SystemRole' + return role_type + + def list(self): + ''' + List Role Definition in scope. + + :return: deserialized Role Definition state dictionary + ''' + self.log("List Role Definition in scope {0}".format(self.scope)) + + response = [] + + try: + response = list(self._client.role_definitions.list(scope=self.scope)) + + if len(response) > 0: + self.log("Response : {0}".format(response)) + roles = [] + + if self.type: + roles = [r for r in response if r.role_type == self.type] + else: + roles = response + + if len(roles) > 0: + return [roledefinition_to_dict(r) for r in roles] + + except Exception as ex: + self.log("Didn't find role definition in scope {0}".format(self.scope)) + + return response + + def get_by_id(self): + ''' + Get Role Definition in scope by id. + + :return: deserialized Role Definition state dictionary + ''' + self.log("Get Role Definition by id {0}".format(self.id)) + + response = None + + try: + response = self._client.role_definitions.get(scope=self.scope, role_definition_id=self.id) + if response: + response = roledefinition_to_dict(response) + if self.type: + if response.role_type == self.type: + return [response] + else: + return [response] + + except Exception as ex: + self.log("Didn't find role definition by id {0}".format(self.id)) + + return [] + + def get_by_role_name(self): + ''' + Get Role Definition in scope by role name. + + :return: deserialized role definition state dictionary + ''' + self.log("Get Role Definition by name {0}".format(self.role_name)) + + response = [] + + try: + response = self.list() + + if len(response) > 0: + roles = [] + for r in response: + if r['role_name'] == self.role_name: + roles.append(r) + + if len(roles) == 1: + self.log("Role Definition : {0} found".format(self.role_name)) + return roles + if len(roles) > 1: + self.fail("Found multiple Role Definitions with name: {0}".format(self.role_name)) + + except Exception as ex: + self.log("Didn't find Role Definition by name {0}".format(self.role_name)) + + return [] + + +def main(): + """Main execution""" + AzureRMRoleDefinitionInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_route.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_route.py new file mode 100644 index 000000000..dc1e79260 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_route.py @@ -0,0 +1,215 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Yuwei Zhou, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_route +version_added: "0.1.2" +short_description: Manage Azure route resource +description: + - Create, update or delete a route. +options: + resource_group: + description: + - Name of resource group. + required: true + name: + description: + - Name of the route. + required: true + state: + description: + - Assert the state of the route. Use C(present) to create or update and C(absent) to delete. + default: present + choices: + - absent + - present + address_prefix: + description: + - The destination CIDR to which the route applies. + next_hop_type: + description: + - The type of Azure hop the packet should be sent to. + choices: + - virtual_network_gateway + - vnet_local + - internet + - virtual_appliance + - none + default: 'none' + next_hop_ip_address: + description: + - The IP address packets should be forwarded to. + - Next hop values are only allowed in routes where the next hop type is VirtualAppliance. + route_table_name: + description: + - The name of the route table. + required: true + + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Yuwei Zhou (@yuwzho) + +''' + +EXAMPLES = ''' + - name: Create a route + azure_rm_route: + resource_group: myResourceGroup + name: myRoute + address_prefix: 10.1.0.0/16 + next_hop_type: virtual_network_gateway + route_table_name: table + + - name: Delete a route + azure_rm_route: + resource_group: myResourceGroup + name: myRoute + route_table_name: table + state: absent +''' +RETURN = ''' +id: + description: + - Current state of the route. + returned: success + type: str + sample: "/subscriptions/xxxx...xxxx/resourceGroups/v-xisuRG/providers/Microsoft.Network/routeTables/tableb57/routes/routeb57" +''' + +try: + from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + +from ansible.module_utils.common.dict_transformations import _snake_to_camel + + +class AzureRMRoute(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True), + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + address_prefix=dict(type='str'), + next_hop_type=dict(type='str', + choices=['virtual_network_gateway', + 'vnet_local', + 'internet', + 'virtual_appliance', + 'none'], + default='none'), + next_hop_ip_address=dict(type='str'), + route_table_name=dict(type='str', required=True) + ) + + required_if = [ + ('state', 'present', ['next_hop_type']) + ] + + self.resource_group = None + self.name = None + self.state = None + self.address_prefix = None + self.next_hop_type = None + self.next_hop_ip_address = None + self.route_table_name = None + + self.results = dict( + changed=False, + id=None + ) + + super(AzureRMRoute, self).__init__(self.module_arg_spec, + required_if=required_if, + supports_check_mode=True) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()): + setattr(self, key, kwargs[key]) + + result = dict() + changed = False + + self.next_hop_type = _snake_to_camel(self.next_hop_type, capitalize_first=True) + + result = self.get_route() + if self.state == 'absent' and result: + changed = True + if not self.check_mode: + self.delete_route() + elif self.state == 'present': + if not result: + changed = True # create new route + else: # check update + if result.next_hop_type != self.next_hop_type: + self.log('Update: {0} next_hop_type from {1} to {2}'.format(self.name, result.next_hop_type, self.next_hop_type)) + changed = True + if result.next_hop_ip_address != self.next_hop_ip_address: + self.log('Update: {0} next_hop_ip_address from {1} to {2}'.format(self.name, result.next_hop_ip_address, self.next_hop_ip_address)) + changed = True + if result.address_prefix != self.address_prefix: + self.log('Update: {0} address_prefix from {1} to {2}'.format(self.name, result.address_prefix, self.address_prefix)) + changed = True + if changed: + result = self.network_models.Route(name=self.name, + address_prefix=self.address_prefix, + next_hop_type=self.next_hop_type, + next_hop_ip_address=self.next_hop_ip_address) + if not self.check_mode: + result = self.create_or_update_route(result) + + self.results['id'] = result.id if result else None + self.results['changed'] = changed + return self.results + + def create_or_update_route(self, param): + try: + poller = self.network_client.routes.begin_create_or_update(self.resource_group, self.route_table_name, self.name, param) + return self.get_poller_result(poller) + except Exception as exc: + self.fail("Error creating or updating route {0} - {1}".format(self.name, str(exc))) + + def delete_route(self): + try: + poller = self.network_client.routes.begin_delete(self.resource_group, self.route_table_name, self.name) + result = self.get_poller_result(poller) + return result + except Exception as exc: + self.fail("Error deleting route {0} - {1}".format(self.name, str(exc))) + + def get_route(self): + try: + return self.network_client.routes.get(self.resource_group, self.route_table_name, self.name) + except ResourceNotFoundError as cloud_err: + # Return None iff the resource is not found + if cloud_err.status_code == 404: + self.log('{0}'.format(str(cloud_err))) + return None + self.fail('Error: failed to get resource {0} - {1}'.format(self.name, str(cloud_err))) + except Exception as exc: + self.fail('Error: failed to get resource {0} - {1}'.format(self.name, str(exc))) + + +def main(): + AzureRMRoute() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_route_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_route_info.py new file mode 100644 index 000000000..ee5311c21 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_route_info.py @@ -0,0 +1,212 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 GuopengLin, (@t-glin) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_route_info +version_added: '2.0.0' +short_description: Get Route info +description: + - Get info of Route. +options: + resource_group: + description: + - The name of the resource group. + required: true + type: str + route_table_name: + description: + - The name of the route table. + required: true + type: str + name: + description: + - The name of the route. + type: str + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str +extends_documentation_fragment: + - azure.azcollection.azure +author: + - GuopengLin (@t-glin) + - Fred-Sun (@Fred-Sun) + - Haiyuan Zhang (@haiyuazhang) + +''' + +EXAMPLES = ''' + - name: Get route + azure_rm_route_info: + resource_group: myResourceGroup + name: routename + route_table_name: routetabename + + + - name: List routes + azure_rm_route_info: + resource_group: myResourceGroup + route_table_name: routetabename + +''' + +RETURN = ''' +routes: + description: + - A list of dict results where the key is the name of the Route and the values are the facts for that Route. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: /subscriptions/xxx-xxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/routeTables/table01/routes/route01 + name: + description: + - The name of the resource that is unique within a resource group. + - This name can be used to access the resource. + returned: always + type: str + sample: route01 + etag: + description: + - A unique read-only string that changes whenever the resource is updated. + returned: always + type: str + sample: fda240c5-6f42-4eb0-8f05-b1599dc722fe + address_prefix: + description: + - The destination CIDR to which the route applies. + returned: always + type: str + sample: 10.0.0.0/25 + next_hop_type: + description: + - The type of Azure hop the packet should be sent to. + returned: always + type: str + sample: VirtualAppliance + next_hop_ip_address: + description: + - The IP address packets should be forwarded to. + - Next hop values are only allowed in routes where the next hop type is VirtualAppliance. + returned: always + type: str + sample: 10.0.0.0 + provisioning_state: + description: + - The provisioning state of the route resource. + returned: always + type: str + sample: Succeeded + +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBase +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.mgmt.network import NetworkManagementClient + from msrestazure.azure_operation import AzureOperationPoller + from msrest.polling import LROPoller +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMRouteInfo(AzureRMModuleBase): + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + route_table_name=dict( + type='str', + required=True + ), + name=dict( + type='str' + ), + tags=dict( + type='list', + elements='str' + ) + ) + + self.resource_group = None + self.route_table_name = None + self.name = None + + self.results = dict(changed=False) + self.mgmt_client = None + self.state = None + self.url = None + self.status_code = [200] + self.tags = None + + self.mgmt_client = None + super(AzureRMRouteInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False, facts_module=True) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if (self.resource_group is not None and self.route_table_name is not None and self.name is not None): + self.results['routes'] = self.format_item(self.get()) + elif (self.resource_group is not None and + self.route_table_name is not None): + self.results['routes'] = self.format_item(self.list()) + return self.results + + def get(self): + response = None + + try: + response = self.network_client.routes.get(resource_group_name=self.resource_group, + route_table_name=self.route_table_name, + route_name=self.name) + except ResourceNotFoundError as e: + self.log('Could not get info for @(Model.ModuleOperationNameUpper).') + + return response + + def list(self): + response = None + + try: + response = self.network_client.routes.list(resource_group_name=self.resource_group, + route_table_name=self.route_table_name) + except ResourceNotFoundError as e: + self.log('Could not get info for @(Model.ModuleOperationNameUpper).') + + return response + + def format_item(self, item): + if hasattr(item, 'as_dict'): + return [item.as_dict()] + else: + result = [] + items = list(item) + for tmp in items: + result.append(tmp.as_dict()) + return result + + +def main(): + AzureRMRouteInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_routetable.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_routetable.py new file mode 100644 index 000000000..7558907e9 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_routetable.py @@ -0,0 +1,196 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Yuwei Zhou, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_routetable +version_added: "0.1.2" +short_description: Manage Azure route table resource +description: + - Create, update or delete a route table. +options: + resource_group: + description: + - Name of resource group. + required: true + name: + description: + - Name of the route table. + required: true + state: + description: + - Assert the state of the route table. Use C(present) to create or update and C(absent) to delete. + default: present + choices: + - absent + - present + disable_bgp_route_propagation: + description: + - Specified whether to disable the routes learned by BGP on that route table. + type: bool + default: False + location: + description: + - Region of the resource. + - Derived from I(resource_group) if not specified. + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Yuwei Zhou (@yuwzho) + +''' + +EXAMPLES = ''' + - name: Create a route table + azure_rm_routetable: + resource_group: myResourceGroup + name: myRouteTable + disable_bgp_route_propagation: False + tags: + purpose: testing + + - name: Delete a route table + azure_rm_routetable: + resource_group: myResourceGroup + name: myRouteTable + state: absent +''' +RETURN = ''' +changed: + description: + - Whether the resource is changed. + returned: always + type: bool + sample: true +id: + description: + - Resource ID. + returned: success + type: str + sample: "/subscriptions/xxx...xxx/resourceGroups/v-xisuRG/providers/Microsoft.Network/routeTables/tableb57dc95642/routes/routeb57dc95986" +''' + +try: + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase, normalize_location_name + + +class AzureRMRouteTable(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True), + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + location=dict(type='str'), + disable_bgp_route_propagation=dict(type='bool', default=False) + ) + + self.resource_group = None + self.name = None + self.state = None + self.location = None + self.tags = None + self.disable_bgp_route_propagation = None + + self.results = dict( + changed=False + ) + + super(AzureRMRouteTable, self).__init__(self.module_arg_spec, + supports_check_mode=True) + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + # Set default location + self.location = resource_group.location + self.location = normalize_location_name(self.location) + + result = dict() + changed = False + + result = self.get_table() + if self.state == 'absent' and result: + changed = True + if not self.check_mode: + self.delete_table() + elif self.state == 'present': + routes = [] + subnets = None + if not result: + changed = True # create new route table + else: # check update + routes = result.routes + subnets = result.subnets + update_tags, self.tags = self.update_tags(result.tags) + if update_tags: + changed = True + if self.disable_bgp_route_propagation != result.disable_bgp_route_propagation: + changed = True + + if changed: + result = self.network_models.RouteTable(location=self.location, + tags=self.tags, + routes=routes, + subnets=subnets, + disable_bgp_route_propagation=self.disable_bgp_route_propagation) + if not self.check_mode: + result = self.create_or_update_table(result) + + self.results['id'] = result.id if result else None + self.results['changed'] = changed + return self.results + + def create_or_update_table(self, param): + try: + poller = self.network_client.route_tables.begin_create_or_update(self.resource_group, self.name, param) + return self.get_poller_result(poller) + except Exception as exc: + self.fail("Error creating or updating route table {0} - {1}".format(self.name, str(exc))) + + def delete_table(self): + try: + poller = self.network_client.route_tables.begin_delete(self.resource_group, self.name) + result = self.get_poller_result(poller) + return result + except Exception as exc: + self.fail("Error deleting virtual network {0} - {1}".format(self.name, str(exc))) + + def get_table(self): + try: + return self.network_client.route_tables.get(self.resource_group, self.name) + except ResourceNotFoundError as cloud_err: + # Return None iff the resource is not found + if cloud_err.status_code == 404: + self.log('{0}'.format(str(cloud_err))) + return None + self.fail('Error: failed to get resource {0} - {1}'.format(self.name, str(cloud_err))) + except Exception as exc: + self.fail('Error: failed to get resource {0} - {1}'.format(self.name, str(exc))) + + +def main(): + AzureRMRouteTable() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_routetable_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_routetable_info.py new file mode 100644 index 000000000..901319ab8 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_routetable_info.py @@ -0,0 +1,217 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Yuwei Zhou, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_routetable_info + +version_added: "0.1.2" + +short_description: Get route table facts + +description: + - Get facts for a specific route table or all route table in a resource group or subscription. + +options: + name: + description: + - Limit results to a specific route table. + resource_group: + description: + - Limit results in a specific resource group. + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Yuwei Zhou (@yuwzho) + +''' + +EXAMPLES = ''' + - name: Get facts for one route table + azure_rm_routetable_info: + name: Testing + resource_group: myResourceGroup + + - name: Get facts for all route tables + azure_rm_routetable_info: + resource_group: myResourceGroup + + - name: Get facts by tags + azure_rm_routetable_info: + tags: + - testing + - foo:bar +''' +RETURN = ''' +id: + description: + - Resource ID. + returned: success + type: str + sample: "/subscriptions/xxxx...xxxx/resourceGroups/v-xisuRG/providers/Microsoft.Network/routeTables/tableb57dc95236" +name: + description: + - Name of the resource. + returned: success + type: str + sample: tableb57dc95236 +resource_group: + description: + - Resource group of the route table. + returned: success + type: str + sample: v-xisuRG +disable_bgp_route_propagation: + description: + - Whether the routes learned by BGP on that route table disabled. + returned: success + type: bool + sample: false +tags: + description: + - Tags of the route table. + returned: success + type: dict + sample: { 'key1':'value1', 'key2':'value2'} +routes: + description: + - Current routes of the route table. + returned: success + type: list + sample: [ + { + "id": "/subscriptions/xxx...xxx/resourceGroups/v-xisuRG/providers/Microsoft.Network/routeTables/tableb57dc95236/routes/routeb57dc95540", + "name": "routeb57dc95540", + "resource_group": "v-xisuRG", + "route_table_name": "tableb57dc95236", + "address_prefix": "10.1.0.0/24", + "next_hop_type": "virtual_network_gateway", + "next_hop_ip_address": null + } + ] +''' + +try: + from azure.core.exceptions import ResourceNotFoundError +except Exception: + # This is handled in azure_rm_common + pass + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase, azure_id_to_dict +from ansible.module_utils.common.dict_transformations import _camel_to_snake + + +def route_to_dict(route): + id_dict = azure_id_to_dict(route.id) + return dict( + id=route.id, + name=route.name, + resource_group=id_dict.get('resourceGroups'), + route_table_name=id_dict.get('routeTables'), + address_prefix=route.address_prefix, + next_hop_type=_camel_to_snake(route.next_hop_type), + next_hop_ip_address=route.next_hop_ip_address + ) + + +def instance_to_dict(table): + return dict( + id=table.id, + name=table.name, + resource_group=azure_id_to_dict(table.id).get('resourceGroups'), + location=table.location, + routes=[route_to_dict(i) for i in table.routes] if table.routes else [], + disable_bgp_route_propagation=table.disable_bgp_route_propagation, + tags=table.tags + ) + + +class AzureRMRouteTableInfo(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type='str'), + resource_group=dict(type='str'), + tags=dict(type='list', elements='str') + ) + + self.results = dict( + changed=False, + route_tables=[] + ) + + self.name = None + self.resource_group = None + self.tags = None + + super(AzureRMRouteTableInfo, self).__init__(self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + facts_module=True) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_routetable_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_routetable_facts' module has been renamed to 'azure_rm_routetable_info'", version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + response = [] + if self.name: + response = self.get_item() + elif self.resource_group: + response = self.list_items() + else: + response = self.list_all_items() + + self.results['route_tables'] = [instance_to_dict(x) for x in response if self.has_tags(x.tags, self.tags)] + return self.results + + def get_item(self): + self.log('Get route table for {0}-{1}'.format(self.resource_group, self.name)) + try: + item = self.network_client.route_tables.get(self.resource_group, self.name) + return [item] + except ResourceNotFoundError: + pass + return [] + + def list_items(self): + self.log('List all items in resource group') + try: + return self.network_client.route_tables.list(self.resource_group) + except ResourceNotFoundError as exc: + self.fail("Failed to list items - {0}".format(str(exc))) + return [] + + def list_all_items(self): + self.log("List all items in subscription") + try: + return self.network_client.route_tables.list_all() + except ResourceNotFoundError as exc: + self.fail("Failed to list all items - {0}".format(str(exc))) + return [] + + +def main(): + AzureRMRouteTableInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_securitygroup.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_securitygroup.py new file mode 100644 index 000000000..28a9b8391 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_securitygroup.py @@ -0,0 +1,834 @@ +#!/usr/bin/python +# +# Copyright (c) 2016 Matt Davis, +# Chris Houseknecht, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_securitygroup +version_added: "0.1.0" +short_description: Manage Azure network security groups +description: + - Create, update or delete a network security group. + - A security group contains Access Control List (ACL) rules that allow or deny network traffic to subnets or individual network interfaces. + - A security group is created with a set of default security rules and an empty set of security rules. + - Shape traffic flow by adding rules to the empty set of security rules. + +options: + default_rules: + description: + - The set of default rules automatically added to a security group at creation. + - In general default rules will not be modified. Modify rules to shape the flow of traffic to or from a subnet or NIC. + - See rules below for the makeup of a rule dict. + location: + description: + - Valid azure location. Defaults to location of the resource group. + name: + description: + - Name of the security group to operate on. + purge_default_rules: + description: + - Remove any existing rules not matching those defined in the default_rules parameter. + type: bool + default: 'no' + purge_rules: + description: + - Remove any existing rules not matching those defined in the rules parameters. + type: bool + default: 'no' + resource_group: + description: + - Name of the resource group the security group belongs to. + required: true + rules: + description: + - Set of rules shaping traffic flow to or from a subnet or NIC. Each rule is a dictionary. + suboptions: + name: + description: + - Unique name for the rule. + required: true + description: + description: + - Short description of the rule's purpose. + protocol: + description: + - Accepted traffic protocol. + choices: + - Udp + - Tcp + - Icmp + - "*" + default: "*" + source_port_range: + description: + - Port or range of ports from which traffic originates. + - It can accept string type or a list of string type. + default: "*" + destination_port_range: + description: + - Port or range of ports to which traffic is headed. + - It can accept string type or a list of string type. + default: "*" + source_address_prefix: + description: + - The CIDR or source IP range. + - Asterisk C(*) can also be used to match all source IPs. + - Default tags such as C(VirtualNetwork), C(AzureLoadBalancer) and C(Internet) can also be used. + - If this is an ingress rule, specifies where network traffic originates from. + - It can accept string type or a list of string type. + - Asterisk C(*) and default tags can only be specified as single string type, not as a list of string. + default: "*" + destination_address_prefix: + description: + - The destination address prefix. + - CIDR or destination IP range. + - Asterisk C(*) can also be used to match all source IPs. + - Default tags such as C(VirtualNetwork), C(AzureLoadBalancer) and C(Internet) can also be used. + - It can accept string type or a list of string type. + - Asterisk C(*) and default tags can only be specified as single string type, not as a list of string. + default: "*" + source_application_security_groups: + description: + - List of the source application security groups. + - It could be list of resource id. + - It could be list of names in same resource group. + - It could be list of dict containing resource_group and name. + - It is mutually exclusive with C(source_address_prefix) and C(source_address_prefixes). + type: list + elements: raw + destination_application_security_groups: + description: + - List of the destination application security groups. + - It could be list of resource id. + - It could be list of names in same resource group. + - It could be list of dict containing I(resource_group) and I(name). + - It is mutually exclusive with C(destination_address_prefix) and C(destination_address_prefixes). + type: list + elements: raw + access: + description: + - Whether or not to allow the traffic flow. + choices: + - Allow + - Deny + default: Allow + priority: + description: + - Order in which to apply the rule. Must a unique integer between 100 and 4096 inclusive. + required: true + direction: + description: + - Indicates the direction of the traffic flow. + choices: + - Inbound + - Outbound + default: Inbound + state: + description: + - Assert the state of the security group. Set to C(present) to create or update a security group. Set to C(absent) to remove a security group. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Chris Houseknecht (@chouseknecht) + - Matt Davis (@nitzmahone) + +''' + +EXAMPLES = ''' + +# Create a security group +- azure_rm_securitygroup: + resource_group: myResourceGroup + name: mysecgroup + purge_rules: yes + rules: + - name: DenySSH + protocol: Tcp + destination_port_range: 22 + access: Deny + priority: 100 + direction: Inbound + - name: 'AllowSSH' + protocol: Tcp + source_address_prefix: + - '174.109.158.0/24' + - '174.109.159.0/24' + destination_port_range: 22 + access: Allow + priority: 101 + direction: Inbound + - name: 'AllowMultiplePorts' + protocol: Tcp + source_address_prefix: + - '174.109.158.0/24' + - '174.109.159.0/24' + destination_port_range: + - 80 + - 443 + access: Allow + priority: 102 + +# Update rules on existing security group +- azure_rm_securitygroup: + resource_group: myResourceGroup + name: mysecgroup + rules: + - name: DenySSH + protocol: Tcp + destination_port_range: 22-23 + access: Deny + priority: 100 + direction: Inbound + - name: AllowSSHFromHome + protocol: Tcp + source_address_prefix: '174.109.158.0/24' + destination_port_range: 22-23 + access: Allow + priority: 102 + direction: Inbound + tags: + testing: testing + delete: on-exit + +# Create a securiy group with I(protocol=Icmp) +- azure_rm_securitygroup: + name: mysecgroup + resource_group: myResourceGroup + rules: + - name: SSH + protocol: Tcp + destination_port_range: 22 + access: Allow + priority: 105 + direction: Inbound + - name: ICMP + protocol: Icmp + priority: 106 + +# Delete security group +- azure_rm_securitygroup: + resource_group: myResourceGroup + name: mysecgroup + state: absent +''' + +RETURN = ''' +state: + description: + - Current state of the security group. + returned: always + type: complex + contains: + default_rules: + description: + - The default security rules of network security group. + returned: always + type: list + sample: [ + { + "access": "Allow", + "description": "Allow inbound traffic from all VMs in VNET", + "destination_address_prefix": "VirtualNetwork", + "destination_port_range": "*", + "direction": "Inbound", + "etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"', + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/AllowVnetInBound", + "name": "AllowVnetInBound", + "priority": 65000, + "protocol": "*", + "provisioning_state": "Succeeded", + "source_address_prefix": "VirtualNetwork", + "source_port_range": "*" + }, + { + "access": "Allow", + "description": "Allow inbound traffic from azure load balancer", + "destination_address_prefix": "*", + "destination_port_range": "*", + "direction": "Inbound", + "etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"', + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/AllowAzureLoadBalancerInBound", + "name": "AllowAzureLoadBalancerInBound", + "priority": 65001, + "protocol": "*", + "provisioning_state": "Succeeded", + "source_address_prefix": "AzureLoadBalancer", + "source_port_range": "*" + }, + { + "access": "Deny", + "description": "Deny all inbound traffic", + "destination_address_prefix": "*", + "destination_port_range": "*", + "direction": "Inbound", + "etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"', + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/DenyAllInBound", + "name": "DenyAllInBound", + "priority": 65500, + "protocol": "*", + "provisioning_state": "Succeeded", + "source_address_prefix": "*", + "source_port_range": "*" + }, + { + "access": "Allow", + "description": "Allow outbound traffic from all VMs to all VMs in VNET", + "destination_address_prefix": "VirtualNetwork", + "destination_port_range": "*", + "direction": "Outbound", + "etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"', + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/AllowVnetOutBound", + "name": "AllowVnetOutBound", + "priority": 65000, + "protocol": "*", + "provisioning_state": "Succeeded", + "source_address_prefix": "VirtualNetwork", + "source_port_range": "*" + }, + { + "access": "Allow", + "description": "Allow outbound traffic from all VMs to Internet", + "destination_address_prefix": "Internet", + "destination_port_range": "*", + "direction": "Outbound", + "etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"', + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/AllowInternetOutBound", + "name": "AllowInternetOutBound", + "priority": 65001, + "protocol": "*", + "provisioning_state": "Succeeded", + "source_address_prefix": "*", + "source_port_range": "*" + }, + { + "access": "Deny", + "description": "Deny all outbound traffic", + "destination_address_prefix": "*", + "destination_port_range": "*", + "direction": "Outbound", + "etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"', + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/DenyAllOutBound", + "name": "DenyAllOutBound", + "priority": 65500, + "protocol": "*", + "provisioning_state": "Succeeded", + "source_address_prefix": "*", + "source_port_range": "*" + } + ] + id: + description: + - The resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkSecurityGroups/mysecgroup" + location: + description: + - The resource location. + returned: always + type: str + sample: "westus" + name: + description: + - Name of the secrurity group. + returned: always + type: str + sample: "mysecgroup" + network_interfaces: + description: + - A collection of references to network interfaces. + returned: always + type: list + sample: [] + rules: + description: + - A collection of security rules of the network security group. + returned: always + type: list + sample: [ + { + "access": "Deny", + "description": null, + "destination_address_prefix": "*", + "destination_port_range": "22", + "direction": "Inbound", + "etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"', + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/securityRules/DenySSH", + "name": "DenySSH", + "priority": 100, + "protocol": "Tcp", + "provisioning_state": "Succeeded", + "source_address_prefix": "*", + "source_port_range": "*" + }, + { + "access": "Allow", + "description": null, + "destination_address_prefix": "*", + "destination_port_range": "22", + "direction": "Inbound", + "etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"', + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/securityRules/AllowSSH", + "name": "AllowSSH", + "priority": 101, + "protocol": "Tcp", + "provisioning_state": "Succeeded", + "source_address_prefix": "174.109.158.0/24", + "source_port_range": "*" + } + ] + subnets: + description: + - A collection of references to subnets. + returned: always + type: list + sample: [] + tags: + description: + - Tags to assign to the security group. + returned: always + type: dict + sample: { + "delete": "on-exit", + "foo": "bar", + "testing": "testing" + } + type: + description: + - The resource type. + returned: always + type: str + sample: "Microsoft.Network/networkSecurityGroups" +''' # NOQA + +try: + from azure.core.exceptions import ResourceNotFoundError + from msrestazure.tools import is_valid_resource_id + from azure.mgmt.network import NetworkManagementClient +except ImportError: + # This is handled in azure_rm_common + pass + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +from ansible.module_utils.six import integer_types +from ansible.module_utils._text import to_native + + +def validate_rule(self, rule, rule_type=None): + ''' + Apply defaults to a rule dictionary and check that all values are valid. + + :param rule: rule dict + :param rule_type: Set to 'default' if the rule is part of the default set of rules. + :return: None + ''' + priority = rule.get('priority', 0) + if rule_type != 'default' and (priority < 100 or priority > 4096): + raise Exception("Rule priority must be between 100 and 4096") + + def check_plural(src, dest): + if isinstance(rule.get(src), list): + rule[dest] = rule[src] + rule[src] = None + + check_plural('destination_address_prefix', 'destination_address_prefixes') + check_plural('source_address_prefix', 'source_address_prefixes') + check_plural('source_port_range', 'source_port_ranges') + check_plural('destination_port_range', 'destination_port_ranges') + + # when source(destination)_application_security_groups set, remove the default value * of source(destination)_address_prefix + if rule.get('source_application_security_groups') and rule.get('source_address_prefix') == '*': + rule['source_address_prefix'] = None + if rule.get('destination_application_security_groups') and rule.get('destination_address_prefix') == '*': + rule['destination_address_prefix'] = None + + +def compare_rules_change(old_list, new_list, purge_list): + old_list = old_list or [] + new_list = new_list or [] + changed = False + + for old_rule in old_list: + matched = next((x for x in new_list if x['name'].lower() == old_rule['name'].lower()), []) + if matched: # if the new one is in the old list, check whether it is updated + changed = changed or compare_rules(old_rule, matched) + elif not purge_list: # keep this rule + new_list.append(old_rule) + else: # one rule is removed + changed = True + # Compare new list and old list is the same? here only compare names + if not changed: + new_names = [to_native(x['name'].lower()) for x in new_list] + old_names = [to_native(x['name'].lower()) for x in old_list] + changed = (set(new_names) != set(old_names)) + return changed, new_list + + +def compare_rules(old_rule, rule): + def compare_list_rule(old_rule, rule, key): + return set(map(str, rule.get(key) or [])) != set(map(str, old_rule.get(key) or [])) + changed = False + if old_rule['name'].lower() != rule['name'].lower(): + changed = True + if rule.get('description', None) != old_rule['description']: + changed = True + if rule['protocol'].lower() != old_rule['protocol'].lower(): + changed = True + if str(rule['source_port_range']) != str(old_rule['source_port_range']): + changed = True + if str(rule['destination_port_range']) != str(old_rule['destination_port_range']): + changed = True + if rule['access'] != old_rule['access']: + changed = True + if rule['priority'] != old_rule['priority']: + changed = True + if rule['direction'] != old_rule['direction']: + changed = True + if str(rule['source_address_prefix']) != str(old_rule['source_address_prefix']): + changed = True + if str(rule['destination_address_prefix']) != str(old_rule['destination_address_prefix']): + changed = True + if compare_list_rule(old_rule, rule, 'source_address_prefixes'): + changed = True + if compare_list_rule(old_rule, rule, 'destination_address_prefixes'): + changed = True + if compare_list_rule(old_rule, rule, 'source_port_ranges'): + changed = True + if compare_list_rule(old_rule, rule, 'destination_port_ranges'): + changed = True + if compare_list_rule(old_rule, rule, 'source_application_security_groups'): + changed = True + if compare_list_rule(old_rule, rule, 'destination_application_security_groups'): + changed = True + return changed + + +def create_rule_instance(self, rule): + ''' + Create an instance of SecurityRule from a dict. + + :param rule: dict + :return: SecurityRule + ''' + return self.nsg_models.SecurityRule( + description=rule.get('description', None), + protocol=rule.get('protocol', None), + source_port_range=rule.get('source_port_range', None), + destination_port_range=rule.get('destination_port_range', None), + source_address_prefix=rule.get('source_address_prefix', None), + source_address_prefixes=rule.get('source_address_prefixes', None), + destination_address_prefix=rule.get('destination_address_prefix', None), + destination_address_prefixes=rule.get('destination_address_prefixes', None), + source_port_ranges=rule.get('source_port_ranges', None), + destination_port_ranges=rule.get('destination_port_ranges', None), + source_application_security_groups=[ + self.nsg_models.ApplicationSecurityGroup(id=p) + for p in rule.get('source_application_security_groups')] if rule.get('source_application_security_groups') else None, + destination_application_security_groups=[ + self.nsg_models.ApplicationSecurityGroup(id=p) + for p in rule.get('destination_application_security_groups')] if rule.get('destination_application_security_groups') else None, + access=rule.get('access', None), + priority=rule.get('priority', None), + direction=rule.get('direction', None), + provisioning_state=rule.get('provisioning_state', None), + name=rule.get('name', None), + etag=rule.get('etag', None) + ) + + +def create_rule_dict_from_obj(rule): + ''' + Create a dict from an instance of a SecurityRule. + + :param rule: SecurityRule + :return: dict + ''' + return dict( + id=rule.id, + name=rule.name, + description=rule.description, + protocol=rule.protocol, + source_port_range=rule.source_port_range, + destination_port_range=rule.destination_port_range, + source_address_prefix=rule.source_address_prefix, + destination_address_prefix=rule.destination_address_prefix, + source_port_ranges=rule.source_port_ranges, + destination_port_ranges=rule.destination_port_ranges, + source_address_prefixes=rule.source_address_prefixes, + destination_address_prefixes=rule.destination_address_prefixes, + source_application_security_groups=[p.id for p in rule.source_application_security_groups] if rule.source_application_security_groups else None, + destination_application_security_groups=[ + p.id for p in rule.destination_application_security_groups] if rule.destination_application_security_groups else None, + access=rule.access, + priority=rule.priority, + direction=rule.direction, + provisioning_state=rule.provisioning_state, + etag=rule.etag + ) + + +def create_network_security_group_dict(nsg): + results = dict( + id=nsg.id, + name=nsg.name, + type=nsg.type, + location=nsg.location, + tags=nsg.tags, + ) + results['rules'] = [] + if nsg.security_rules: + for rule in nsg.security_rules: + results['rules'].append(create_rule_dict_from_obj(rule)) + + results['default_rules'] = [] + if nsg.default_security_rules: + for rule in nsg.default_security_rules: + results['default_rules'].append(create_rule_dict_from_obj(rule)) + + results['network_interfaces'] = [] + if nsg.network_interfaces: + for interface in nsg.network_interfaces: + results['network_interfaces'].append(interface.id) + + results['subnets'] = [] + if nsg.subnets: + for subnet in nsg.subnets: + results['subnets'].append(subnet.id) + + return results + + +rule_spec = dict( + name=dict(type='str', required=True), + description=dict(type='str'), + protocol=dict(type='str', choices=['Udp', 'Tcp', 'Icmp', '*'], default='*'), + source_port_range=dict(type='raw', default='*'), + destination_port_range=dict(type='raw', default='*'), + source_address_prefix=dict(type='raw', default='*'), + destination_address_prefix=dict(type='raw', default='*'), + source_application_security_groups=dict(type='list', elements='raw'), + destination_application_security_groups=dict(type='list', elements='raw'), + access=dict(type='str', choices=['Allow', 'Deny'], default='Allow'), + priority=dict(type='int', required=True), + direction=dict(type='str', choices=['Inbound', 'Outbound'], default='Inbound') +) + + +class AzureRMSecurityGroup(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + default_rules=dict(type='list', elements='dict', options=rule_spec), + location=dict(type='str'), + name=dict(type='str', required=True), + purge_default_rules=dict(type='bool', default=False), + purge_rules=dict(type='bool', default=False), + resource_group=dict(required=True, type='str'), + rules=dict(type='list', elements='dict', options=rule_spec), + state=dict(type='str', default='present', choices=['present', 'absent']), + ) + + self.default_rules = None + self.location = None + self.name = None + self.purge_default_rules = None + self.purge_rules = None + self.resource_group = None + self.rules = None + self.state = None + self.tags = None + self.nsg_models = None # type: azure.mgmt.network.models + + self.results = dict( + changed=False, + state=dict() + ) + + mutually_exclusive = [["source_application_security_group", "source_address_prefix"], + ["source_application_security_group", "source_address_prefixes"], + ["destination_application_security_group", "destination_address_prefix"], + ["destination_application_security_group", "destination_address_prefixes"]] + + super(AzureRMSecurityGroup, self).__init__(self.module_arg_spec, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive) + + def exec_module(self, **kwargs): + # tighten up poll interval for security groups; default 30s is an eternity + # this value is still overridden by the response Retry-After header (which is set on the initial operation response to 10s) + # self.network_client.config.long_running_operation_timeout = 3 + self.nsg_models = self.network_client.network_security_groups.models + + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + changed = False + results = dict() + + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + # Set default location + self.location = resource_group.location + + if self.rules: + for rule in self.rules: + try: + validate_rule(self, rule) + except Exception as exc: + self.fail("Error validating rule {0} - {1}".format(rule, str(exc))) + self.convert_asg_to_id(rule) + + if self.default_rules: + for rule in self.default_rules: + try: + validate_rule(self, rule, 'default') + except Exception as exc: + self.fail("Error validating default rule {0} - {1}".format(rule, str(exc))) + self.convert_asg_to_id(rule) + + try: + nsg = self.network_client.network_security_groups.get(self.resource_group, self.name) + results = create_network_security_group_dict(nsg) + self.log("Found security group:") + self.log(results, pretty_print=True) + self.check_provisioning_state(nsg, self.state) + if self.state == 'present': + pass + elif self.state == 'absent': + self.log("CHANGED: security group found but state is 'absent'") + changed = True + except ResourceNotFoundError: # TODO: actually check for ResourceMissingError + if self.state == 'present': + self.log("CHANGED: security group not found and state is 'present'") + changed = True + + if self.state == 'present' and not changed: + # update the security group + self.log("Update security group {0}".format(self.name)) + + update_tags, results['tags'] = self.update_tags(results['tags']) + if update_tags: + changed = True + + rule_changed, new_rule = compare_rules_change(results['rules'], self.rules, self.purge_rules) + if rule_changed: + changed = True + results['rules'] = new_rule + rule_changed, new_rule = compare_rules_change(results['default_rules'], self.default_rules, self.purge_default_rules) + if rule_changed: + changed = True + results['default_rules'] = new_rule + + self.results['changed'] = changed + self.results['state'] = results + if not self.check_mode and changed: + self.results['state'] = self.create_or_update(results) + + elif self.state == 'present' and changed: + # create the security group + self.log("Create security group {0}".format(self.name)) + + if not self.location: + self.fail("Parameter error: location required when creating a security group.") + + results['name'] = self.name + results['location'] = self.location + results['rules'] = [] + results['default_rules'] = [] + results['tags'] = {} + + if self.rules: + results['rules'] = self.rules + if self.default_rules: + results['default_rules'] = self.default_rules + if self.tags: + results['tags'] = self.tags + + self.results['changed'] = changed + self.results['state'] = results + if not self.check_mode: + self.results['state'] = self.create_or_update(results) + + elif self.state == 'absent' and changed: + self.log("Delete security group {0}".format(self.name)) + self.results['changed'] = changed + self.results['state'] = dict() + if not self.check_mode: + self.delete() + # the delete does not actually return anything. if no exception, then we'll assume + # it worked. + self.results['state']['status'] = 'Deleted' + + return self.results + + def create_or_update(self, results): + parameters = self.nsg_models.NetworkSecurityGroup() + if results.get('rules'): + parameters.security_rules = [] + for rule in results.get('rules'): + parameters.security_rules.append(create_rule_instance(self, rule)) + if results.get('default_rules'): + parameters.default_security_rules = [] + for rule in results.get('default_rules'): + parameters.default_security_rules.append(create_rule_instance(self, rule)) + parameters.tags = results.get('tags') + parameters.location = results.get('location') + + try: + poller = self.network_client.network_security_groups.begin_create_or_update(resource_group_name=self.resource_group, + network_security_group_name=self.name, + parameters=parameters) + result = self.get_poller_result(poller) + except Exception as exc: + self.fail("Error creating/updating security group {0} - {1}".format(self.name, str(exc))) + return create_network_security_group_dict(result) + + def delete(self): + try: + poller = self.network_client.network_security_groups.begin_delete(resource_group_name=self.resource_group, network_security_group_name=self.name) + result = self.get_poller_result(poller) + except Exception as exc: + self.fail("Error deleting security group {0} - {1}".format(self.name, str(exc))) + + return result + + def convert_asg_to_id(self, rule): + def convert_to_id(rule, key): + if rule.get(key): + ids = [] + for p in rule.get(key): + if isinstance(p, dict): + ids.append("/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/applicationSecurityGroups/{2}".format( + self.subscription_id, p.get('resource_group'), p.get('name'))) + elif isinstance(p, str): + if is_valid_resource_id(p): + ids.append(p) + else: + ids.append("/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/applicationSecurityGroups/{2}".format( + self.subscription_id, self.resource_group, p)) + rule[key] = ids + convert_to_id(rule, 'source_application_security_groups') + convert_to_id(rule, 'destination_application_security_groups') + + +def main(): + AzureRMSecurityGroup() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_securitygroup_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_securitygroup_info.py new file mode 100644 index 000000000..2a549c631 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_securitygroup_info.py @@ -0,0 +1,395 @@ +#!/usr/bin/python +# +# Copyright (c) 2016 Matt Davis, +# Chris Houseknecht, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_securitygroup_info + +version_added: "0.1.2" + +short_description: Get security group facts + +description: + - Get facts for a specific security group or all security groups within a resource group. + +options: + name: + description: + - Only show results for a specific security group. + resource_group: + description: + - Name of the resource group to use. + required: true + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Chris Houseknecht (@chouseknecht) + - Matt Davis (@nitzmahone) + +''' + +EXAMPLES = ''' + - name: Get facts for one security group + azure_rm_securitygroup_info: + resource_group: myResourceGroup + name: secgroup001 + + - name: Get facts for all security groups + azure_rm_securitygroup_info: + resource_group: myResourceGroup + +''' + +RETURN = ''' +securitygroups: + description: + - List containing security group dicts. + returned: always + type: complex + contains: + etag: + description: + - A unique read-only string that changes whenever the resource is updated. + returned: always + type: str + sample: 'W/"d036f4d7-d977-429a-a8c6-879bc2523399"' + id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkSecurityGroups/secgroup001" + location: + description: + - Resource location. + returned: always + type: str + sample: "eastus2" + name: + description: + - Resource name. + returned: always + type: str + sample: "secgroup001" + default_rules: + description: + - The default security rules of network security group. + returned: always + type: list + sample: [ + { + "access": "Allow", + "description": "Allow inbound traffic from all VMs in VNET", + "destination_address_prefix": "VirtualNetwork", + "destination_port_range": "*", + "direction": "Inbound", + "etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"', + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/AllowVnetInBound", + "name": "AllowVnetInBound", + "priority": 65000, + "protocol": "*", + "provisioning_state": "Succeeded", + "source_address_prefix": "VirtualNetwork", + "source_port_range": "*" + }, + { + "access": "Allow", + "description": "Allow inbound traffic from azure load balancer", + "destination_address_prefix": "*", + "destination_port_range": "*", + "direction": "Inbound", + "etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"', + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/AllowAzureLoadBalancerInBound", + "name": "AllowAzureLoadBalancerInBound", + "priority": 65001, + "protocol": "*", + "provisioning_state": "Succeeded", + "source_address_prefix": "AzureLoadBalancer", + "source_port_range": "*" + }, + { + "access": "Deny", + "description": "Deny all inbound traffic", + "destination_address_prefix": "*", + "destination_port_range": "*", + "direction": "Inbound", + "etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"', + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/DenyAllInBound", + "name": "DenyAllInBound", + "priority": 65500, + "protocol": "*", + "provisioning_state": "Succeeded", + "source_address_prefix": "*", + "source_port_range": "*" + }, + { + "access": "Allow", + "description": "Allow outbound traffic from all VMs to all VMs in VNET", + "destination_address_prefix": "VirtualNetwork", + "destination_port_range": "*", + "direction": "Outbound", + "etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"', + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/AllowVnetOutBound", + "name": "AllowVnetOutBound", + "priority": 65000, + "protocol": "*", + "provisioning_state": "Succeeded", + "source_address_prefix": "VirtualNetwork", + "source_port_range": "*" + }, + { + "access": "Allow", + "description": "Allow outbound traffic from all VMs to Internet", + "destination_address_prefix": "Internet", + "destination_port_range": "*", + "direction": "Outbound", + "etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"', + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/AllowInternetOutBound", + "name": "AllowInternetOutBound", + "priority": 65001, + "protocol": "*", + "provisioning_state": "Succeeded", + "source_address_prefix": "*", + "source_port_range": "*" + }, + { + "access": "Deny", + "description": "Deny all outbound traffic", + "destination_address_prefix": "*", + "destination_port_range": "*", + "direction": "Outbound", + "etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"', + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/DenyAllOutBound", + "name": "DenyAllOutBound", + "priority": 65500, + "protocol": "*", + "provisioning_state": "Succeeded", + "source_address_prefix": "*", + "source_port_range": "*" + } + ] + network_interfaces: + description: + - A collection of references to network interfaces. + returned: always + type: list + sample: [] + rules: + description: + - A collection of security rules of the network security group. + returned: always + type: list + sample: [ + { + "access": "Deny", + "description": null, + "destination_address_prefix": "*", + "destination_port_range": "22", + "direction": "Inbound", + "etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"', + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/securityRules/DenySSH", + "name": "DenySSH", + "priority": 100, + "protocol": "Tcp", + "provisioning_state": "Succeeded", + "source_address_prefix": "*", + "source_port_range": "*" + }, + { + "access": "Allow", + "description": null, + "destination_address_prefix": "*", + "destination_port_range": "22", + "direction": "Inbound", + "etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"', + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/securityRules/AllowSSH", + "name": "AllowSSH", + "priority": 101, + "protocol": "Tcp", + "provisioning_state": "Succeeded", + "source_address_prefix": "174.109.158.0/24", + "source_port_range": "*" + } + ] + subnets: + description: + - A collection of references to subnets. + returned: always + type: list + sample: [] + tags: + description: + - Tags to assign to the security group. + returned: always + type: dict + sample: { 'tag': 'value' } + type: + description: + - Type of the resource. + returned: always + type: str + sample: "Microsoft.Network/networkSecurityGroups" + +''' # NOQA + +try: + from azure.core.exceptions import ResourceNotFoundError +except Exception: + # This is handled in azure_rm_common + pass + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + + +def create_rule_dict_from_obj(rule): + return dict( + id=rule.id, + name=rule.name, + description=rule.description, + protocol=rule.protocol, + source_port_range=rule.source_port_range, + destination_port_range=rule.destination_port_range, + source_address_prefix=rule.source_address_prefix, + destination_address_prefix=rule.destination_address_prefix, + source_port_ranges=rule.source_port_ranges, + destination_port_ranges=rule.destination_port_ranges, + source_address_prefixes=rule.source_address_prefixes, + destination_address_prefixes=rule.destination_address_prefixes, + source_application_security_groups=[p.id for p in rule.source_application_security_groups] if rule.source_application_security_groups else None, + destination_application_security_groups=[ + p.id for p in rule.destination_application_security_groups] if rule.destination_application_security_groups else None, + access=rule.access, + priority=rule.priority, + direction=rule.direction, + provisioning_state=rule.provisioning_state, + etag=rule.etag + ) + + +def create_network_security_group_dict(nsg): + result = dict( + etag=nsg.etag, + id=nsg.id, + location=nsg.location, + name=nsg.name, + tags=nsg.tags, + type=nsg.type, + ) + result['rules'] = [] + if nsg.security_rules: + for rule in nsg.security_rules: + result['rules'].append(create_rule_dict_from_obj(rule)) + + result['default_rules'] = [] + if nsg.default_security_rules: + for rule in nsg.default_security_rules: + result['default_rules'].append(create_rule_dict_from_obj(rule)) + + result['network_interfaces'] = [] + if nsg.network_interfaces: + for interface in nsg.network_interfaces: + result['network_interfaces'].append(interface.id) + + result['subnets'] = [] + if nsg.subnets: + for subnet in nsg.subnets: + result['subnets'].append(subnet.id) + + return result + + +class AzureRMSecurityGroupInfo(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type='str'), + resource_group=dict(required=True, type='str'), + tags=dict(type='list', elements='str'), + ) + + self.results = dict( + changed=False, + ) + + self.name = None + self.resource_group = None + self.tags = None + + super(AzureRMSecurityGroupInfo, self).__init__(self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + facts_module=True) + + def exec_module(self, **kwargs): + + is_old_facts = self.module._name == 'azure_rm_securitygroup_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_securitygroup_facts' module has been renamed to 'azure_rm_securitygroup_info'", version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.name is not None: + info = self.get_item() + else: + info = self.list_items() + + if is_old_facts: + self.results['ansible_facts'] = { + 'azure_securitygroups': info + } + self.results['securitygroups'] = info + + return self.results + + def get_item(self): + self.log('Get properties for {0}'.format(self.name)) + item = None + result = [] + + try: + item = self.network_client.network_security_groups.get(self.resource_group, self.name) + except ResourceNotFoundError: + pass + + if item and self.has_tags(item.tags, self.tags): + result = [create_network_security_group_dict(item)] + + return result + + def list_items(self): + self.log('List all items') + try: + response = self.network_client.network_security_groups.list(self.resource_group) + except Exception as exc: + self.fail("Error listing all items - {0}".format(str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(create_network_security_group_dict(item)) + return results + + +def main(): + AzureRMSecurityGroupInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_servicebus.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_servicebus.py new file mode 100644 index 000000000..07db88849 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_servicebus.py @@ -0,0 +1,219 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Yuwei Zhou, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_servicebus +version_added: "0.1.2" +short_description: Manage Azure Service Bus +description: + - Create, update or delete an Azure Service Bus namespaces. +options: + resource_group: + description: + - Name of resource group. + required: true + name: + description: + - Name of the servicebus namespace. + required: true + state: + description: + - Assert the state of the servicebus. Use C(present) to create or update and use C(absen) to delete. + default: present + choices: + - absent + - present + location: + description: + - The servicebus's location. + sku: + description: + - Namespace SKU. + choices: + - standard + - basic + - premium + default: standard + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Yuwei Zhou (@yuwzho) + +''' + +EXAMPLES = ''' +- name: Create a namespace + azure_rm_servicebus: + name: deadbeef + location: eastus + tags: + key1: value1 +''' +RETURN = ''' +id: + description: + - Current state of the service bus. + returned: success + type: str + sample: "/subscriptions/xxx...xxx/resourceGroups/myResourceGroup/providers/Microsoft.ServiceBus/namespaces/myServicebus" +''' + +try: + from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +except ImportError: + # This is handled in azure_rm_common + pass + +from ansible.module_utils.common.dict_transformations import _snake_to_camel, _camel_to_snake +from ansible.module_utils._text import to_native +from datetime import datetime, timedelta + + +class AzureRMServiceBus(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True), + name=dict(type='str', required=True), + location=dict(type='str'), + state=dict(type='str', default='present', choices=['present', 'absent']), + sku=dict(type='str', choices=['basic', 'standard', 'premium'], default='standard') + ) + + self.resource_group = None + self.name = None + self.state = None + self.sku = None + self.location = None + + self.results = dict( + changed=False, + id=None + ) + + super(AzureRMServiceBus, self).__init__(self.module_arg_spec, + supports_tags=True, + supports_check_mode=True) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + changed = False + + if not self.location: + resource_group = self.get_resource_group(self.resource_group) + self.location = resource_group.location + + original = self.get() + + if not original: + self.check_name() + + if self.state == 'present': + if not self.check_mode: + if original: + update_tags, new_tags = self.update_tags(original.tags) + if update_tags: + changed = True + self.tags = new_tags + original = self.create() + else: + changed = False + else: + changed = True + original = self.create() + else: + changed = True + elif self.state == 'absent' and original: + changed = True + original = None + if not self.check_mode: + self.delete() + self.results['deleted'] = True + + if original: + self.results = self.to_dict(original) + self.results['changed'] = changed + return self.results + + def check_name(self): + try: + check_name = self.servicebus_client.namespaces.check_name_availability(parameters={'name': self.name}) + if not check_name or not check_name.name_available: + self.fail("Error creating namespace {0} - {1}".format(self.name, check_name.message or str(check_name))) + except Exception as exc: + self.fail("Error creating namespace {0} - {1}".format(self.name, exc.message or str(exc))) + + def create(self): + self.log('Cannot find namespace, creating a one') + try: + sku = self.servicebus_models.SBSku(name=str.capitalize(self.sku)) + poller = self.servicebus_client.namespaces.begin_create_or_update(self.resource_group, + self.name, + self.servicebus_models.SBNamespace(location=self.location, + tags=self.tags, + sku=sku)) + ns = self.get_poller_result(poller) + except Exception as exc: + self.fail('Error creating namespace {0} - {1}'.format(self.name, str(exc.inner_exception) or str(exc))) + return ns + + def delete(self): + try: + self.servicebus_client.namespaces.begin_delete(self.resource_group, self.name) + return True + except Exception as exc: + self.fail("Error deleting route {0} - {1}".format(self.name, str(exc))) + + def get(self): + try: + return self.servicebus_client.namespaces.get(self.resource_group, self.name) + except Exception: + return None + + def to_dict(self, instance): + result = dict() + attribute_map = self.servicebus_models.SBNamespace._attribute_map + for attribute in attribute_map.keys(): + value = getattr(instance, attribute) + if not value: + continue + if isinstance(value, self.servicebus_models.SBSku): + result[attribute] = value.name.lower() + elif isinstance(value, datetime): + result[attribute] = str(value) + elif isinstance(value, str): + result[attribute] = to_native(value) + elif attribute == 'max_size_in_megabytes': + result['max_size_in_mb'] = value + else: + result[attribute] = value + return result + + +def is_valid_timedelta(value): + if value == timedelta(10675199, 10085, 477581): + return None + return value + + +def main(): + AzureRMServiceBus() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_servicebus_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_servicebus_info.py new file mode 100644 index 000000000..246bb514b --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_servicebus_info.py @@ -0,0 +1,582 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Yuwei Zhou, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_servicebus_info + +version_added: "0.1.2" + +short_description: Get servicebus facts + +description: + - Get facts for a specific servicebus or all servicebus in a resource group or subscription. + +options: + name: + description: + - Limit results to a specific servicebus. + resource_group: + description: + - Limit results in a specific resource group. + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + namespace: + description: + - Servicebus namespace name. + - A namespace is a scoping container for all messaging components. + - Multiple queues and topics can reside within a single namespace, and namespaces often serve as application containers. + - Required when I(type=namespace). + type: + description: + - Type of the resource. + choices: + - namespace + - queue + - topic + - subscription + topic: + description: + - Topic name. + - Required when I(type=subscription). + show_sas_policies: + description: + - Whether to show the SAS policies. + - Not support when I(type=subscription). + - Note if enable this option, the facts module will raise two more HTTP call for each resources, need more network overhead. + type: bool +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Yuwei Zhou (@yuwzho) + +''' + +EXAMPLES = ''' +- name: Get all namespaces under a resource group + azure_rm_servicebus_info: + resource_group: myResourceGroup + type: namespace + +- name: Get all topics under a namespace + azure_rm_servicebus_info: + resource_group: myResourceGroup + namespace: bar + type: topic + +- name: Get a single queue with SAS policies + azure_rm_servicebus_info: + resource_group: myResourceGroup + namespace: bar + type: queue + name: sbqueue + show_sas_policies: true + +- name: Get all subscriptions under a resource group + azure_rm_servicebus_info: + resource_group: myResourceGroup + type: subscription + namespace: bar + topic: sbtopic +''' +RETURN = ''' +servicebuses: + description: + - List of servicebus dicts. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/resourceGroups/foo/providers/Microsoft.ServiceBus/ + namespaces/bar/topics/baz/subscriptions/qux" + name: + description: + - Resource name. + returned: always + type: str + sample: qux + location: + description: + - The Geo-location where the resource lives. + returned: always + type: str + sample: eastus + namespace: + description: + - I(namespace) name of the C(queue) or C(topic), C(subscription). + returned: always + type: str + sample: bar + topic: + description: + - Topic name of a subscription. + returned: always + type: str + sample: baz + tags: + description: + - Resource tags. + returned: always + type: dict + sample: {env: sandbox} + sku: + description: + - Properties of namespace's SKU. + returned: always + type: str + sample: Standard + provisioning_state: + description: + - Provisioning state of the namespace. + returned: always + type: str + sample: Succeeded + service_bus_endpoint: + description: + - Endpoint you can use to perform Service Bus operations. + returned: always + type: str + sample: "https://bar.servicebus.windows.net:443/" + metric_id: + description: + - Identifier for Azure Insights metrics of namespace. + returned: always + type: str + sample: "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX:bar" + type: + description: + - Resource type. + - Namespace is a scoping container for all messaging components. + - Queue enables you to store messages until the receiving application is available to receive and process them. + - Topic and subscriptions enable 1:n relationships between publishers and subscribers. + returned: always + type: str + sample: "Microsoft.ServiceBus/Namespaces/Topics" + created_at: + description: + - Exact time the message was created. + returned: always + type: str + sample: "2019-01-25 02:46:55.543953+00:00" + updated_at: + description: + - The exact time the message was updated. + returned: always + type: str + sample: "2019-01-25 02:46:55.543953+00:00" + accessed_at: + description: + - Last time the message was sent, or a request was received for this topic. + returned: always + type: str + sample: "2019-01-25 02:46:55.543953+00:00" + subscription_count: + description: + - Number of subscriptions under a topic. + returned: always + type: int + sample: 1 + count_details: + description: + - Message count details. + returned: always + type: complex + contains: + active_message_count: + description: + - Number of active messages in the C(queue), C(topic), or C(subscription). + returned: always + type: int + sample: 0 + dead_letter_message_count: + description: + - Number of messages that are dead lettered. + returned: always + type: int + sample: 0 + scheduled_message_count: + description: + - Number of scheduled messages. + returned: always + type: int + sample: 0 + transfer_message_count: + description: + - Number of messages transferred to another C(queue), C(topic), or C(subscription). + returned: always + type: int + sample: 0 + transfer_dead_letter_message_count: + description: + - Number of messages transferred into dead letters. + returned: always + type: int + sample: 0 + support_ordering: + description: + - Value that indicates whether the C(topic) supports ordering. + returned: always + type: bool + sample: true + status: + description: + - The status of a messaging entity. + returned: always + type: str + sample: active + requires_session: + description: + - A value that indicates whether the C(queue) or C(topic) supports the concept of sessions. + returned: always + type: bool + sample: true + requires_duplicate_detection: + description: + - A value indicating if this C(queue) or C(topic) requires duplicate detection. + returned: always + type: bool + sample: true + max_size_in_mb: + description: + - Maximum size of the C(queue) or C(topic) in megabytes, which is the size of the memory allocated for the C(topic). + returned: always + type: int + sample: 5120 + max_delivery_count: + description: + - The maximum delivery count. + - A message is automatically deadlettered after this number of deliveries. + returned: always + type: int + sample: 10 + lock_duration_in_seconds: + description: + - ISO 8601 timespan duration of a peek-lock. + - The amount of time that the message is locked for other receivers. + - The maximum value for LockDuration is 5 minutes. + returned: always + type: int + sample: 60 + forward_to: + description: + - C(queue) or C(topic) name to forward the messages. + returned: always + type: str + sample: quux + forward_dead_lettered_messages_to: + description: + - C(queue) or C(topic) name to forward the Dead Letter message. + returned: always + type: str + sample: corge + enable_partitioning: + description: + - Value that indicates whether the C(queue) or C(topic) to be partitioned across multiple message brokers is enabled. + returned: always + type: bool + sample: true + enable_express: + description: + - Value that indicates whether Express Entities are enabled. + - An express topic holds a message in memory temporarily before writing it to persistent storage. + returned: always + type: bool + sample: true + enable_batched_operations: + description: + - Value that indicates whether server-side batched operations are enabled. + returned: always + type: bool + sample: true + duplicate_detection_time_in_seconds: + description: + - ISO 8601 timeSpan structure that defines the duration of the duplicate detection history. + returned: always + type: int + sample: 600 + default_message_time_to_live_seconds: + description: + - ISO 8061 Default message timespan to live value. + - This is the duration after which the message expires, starting from when the message is sent to Service Bus. + - This is the default value used when TimeToLive is not set on a message itself. + returned: always + type: int + sample: 0 + dead_lettering_on_message_expiration: + description: + - A value that indicates whether this C(queue) or C(topic) has dead letter support when a message expires. + returned: always + type: int + sample: 0 + dead_lettering_on_filter_evaluation_exceptions: + description: + - Value that indicates whether a subscription has dead letter support on filter evaluation exceptions. + returned: always + type: int + sample: 0 + auto_delete_on_idle_in_seconds: + description: + - ISO 8061 timeSpan idle interval after which the queue or topic is automatically deleted. + - The minimum duration is 5 minutes. + returned: always + type: int + sample: true + size_in_bytes: + description: + - The size of the C(queue) or C(topic) in bytes. + returned: always + type: int + sample: 0 + message_count: + description: + - Number of messages. + returned: always + type: int + sample: 10 + sas_policies: + description: + - Dict of SAS policies. + - Will not be returned until I(show_sas_policy) set. + returned: always + type: dict + sample: { + "testpolicy1": { + "id": "/subscriptions/XXXXXXXX-XXXX-XXXX-XXXXXXXXXXXX/resourceGroups/ + foo/providers/Microsoft.ServiceBus/namespaces/bar/queues/qux/authorizationRules/testpolicy1", + "keys": { + "key_name": "testpolicy1", + "primary_connection_string": "Endpoint=sb://bar.servicebus.windows.net/; + SharedAccessKeyName=testpolicy1;SharedAccessKey=XXXXXXXXXXXXXXXXX;EntityPath=qux", + "primary_key": "XXXXXXXXXXXXXXXXX", + "secondary_connection_string": "Endpoint=sb://bar.servicebus.windows.net/; + SharedAccessKeyName=testpolicy1;SharedAccessKey=XXXXXXXXXXXXXXX;EntityPath=qux", + "secondary_key": "XXXXXXXXXXXXXXX" + }, + "name": "testpolicy1", + "rights": "listen_send", + "type": "Microsoft.ServiceBus/Namespaces/Queues/AuthorizationRules" + } + } +''' + +try: + from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase, azure_id_to_dict + from msrestazure.azure_exceptions import CloudError +except Exception: + # This is handled in azure_rm_common + pass + +from ansible.module_utils.common.dict_transformations import _camel_to_snake +from ansible.module_utils._text import to_native +from datetime import datetime, timedelta + +duration_spec_map = dict( + default_message_time_to_live='default_message_time_to_live_seconds', + duplicate_detection_history_time_window='duplicate_detection_time_in_seconds', + auto_delete_on_idle='auto_delete_on_idle_in_seconds', + lock_duration='lock_duration_in_seconds' +) + + +def is_valid_timedelta(value): + if value == timedelta(10675199, 10085, 477581): + return None + return value + + +class AzureRMServiceBusInfo(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type='str'), + resource_group=dict(type='str'), + tags=dict(type='list', elements='str'), + type=dict(type='str', required=True, choices=['namespace', 'topic', 'queue', 'subscription']), + namespace=dict(type='str'), + topic=dict(type='str'), + show_sas_policies=dict(type='bool') + ) + + required_if = [ + ('type', 'subscription', ['topic', 'resource_group', 'namespace']), + ('type', 'topic', ['resource_group', 'namespace']), + ('type', 'queue', ['resource_group', 'namespace']) + ] + + self.results = dict( + changed=False, + servicebuses=[] + ) + + self.name = None + self.resource_group = None + self.tags = None + self.type = None + self.namespace = None + self.topic = None + self.show_sas_policies = None + + super(AzureRMServiceBusInfo, self).__init__(self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + required_if=required_if, + facts_module=True) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_servicebus_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_servicebus_facts' module has been renamed to 'azure_rm_servicebus_info'", version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + response = [] + if self.name: + response = self.get_item() + elif self.resource_group: + response = self.list_items() + else: + response = self.list_all_items() + + self.results['servicebuses'] = [self.instance_to_dict(x) for x in response] + return self.results + + def instance_to_dict(self, instance): + result = dict() + instance_type = getattr(self.servicebus_models, 'SB{0}'.format(str.capitalize(self.type))) + attribute_map = instance_type._attribute_map + for attribute in attribute_map.keys(): + value = getattr(instance, attribute) + if attribute_map[attribute]['type'] == 'duration': + if is_valid_timedelta(value): + key = duration_spec_map.get(attribute) or attribute + result[key] = int(value.total_seconds()) + elif attribute == 'status': + result['status'] = _camel_to_snake(value) + elif isinstance(value, self.servicebus_models.MessageCountDetails): + result[attribute] = value.as_dict() + elif isinstance(value, self.servicebus_models.SBSku): + result[attribute] = value.name.lower() + elif isinstance(value, datetime): + result[attribute] = str(value) + elif isinstance(value, str): + result[attribute] = to_native(value) + elif attribute == 'max_size_in_megabytes': + result['max_size_in_mb'] = value + else: + result[attribute] = value + if self.show_sas_policies and self.type != 'subscription': + policies = self.get_auth_rules() + for name in policies.keys(): + policies[name]['keys'] = self.get_sas_key(name) + result['sas_policies'] = policies + if self.namespace: + result['namespace'] = self.namespace + if self.topic: + result['topic'] = self.topic + return result + + def _get_client(self): + return getattr(self.servicebus_client, '{0}s'.format(self.type)) + + def get_item(self): + try: + client = self._get_client() + if self.type == 'namespace': + item = client.get(self.resource_group, self.name) + return [item] if self.has_tags(item.tags, self.tags) else [] + elif self.type == 'subscription': + return [client.get(self.resource_group, self.namespace, self.topic, self.name)] + else: + return [client.get(self.resource_group, self.namespace, self.name)] + except Exception: + pass + return [] + + def list_items(self): + try: + client = self._get_client() + if self.type == 'namespace': + response = client.list_by_resource_group(self.resource_group) + return [x for x in response if self.has_tags(x.tags, self.tags)] + elif self.type == 'subscription': + return client.list_by_topic(self.resource_group, self.namespace, self.topic) + else: + return client.list_by_namespace(self.resource_group, self.namespace) + except Exception as exc: + self.fail("Failed to list items - {0}".format(str(exc))) + return [] + + def list_all_items(self): + self.log("List all items in subscription") + try: + if self.type != 'namespace': + return [] + response = self.servicebus_client.namespaces.list() + return [x for x in response if self.has_tags(x.tags, self.tags)] + except Exception as exc: + self.fail("Failed to list all items - {0}".format(str(exc))) + return [] + + def get_auth_rules(self): + result = dict() + try: + client = self._get_client() + if self.type == 'namespace': + rules = client.list_authorization_rules(self.resource_group, self.name) + else: + rules = client.list_authorization_rules(self.resource_group, self.namespace, self.name) + while True: + rule = rules.next() + result[rule.name] = self.policy_to_dict(rule) + except StopIteration: + pass + except Exception as exc: + self.fail('Error when getting SAS policies for {0} {1}: {2}'.format(self.type, self.name, exc.message or str(exc))) + return result + + def get_sas_key(self, name): + try: + client = self._get_client() + if self.type == 'namespace': + return client.list_keys(self.resource_group, self.name, name).as_dict() + else: + return client.list_keys(self.resource_group, self.namespace, self.name, name).as_dict() + except Exception as exc: + self.fail('Error when getting SAS policy {0}\'s key - {1}'.format(name, exc.message or str(exc))) + return None + + def policy_to_dict(self, rule): + result = rule.as_dict() + rights = result['rights'] + if 'Manage' in rights: + result['rights'] = 'manage' + elif 'Listen' in rights and 'Send' in rights: + result['rights'] = 'listen_send' + else: + result['rights'] = rights[0].lower() + return result + + +def main(): + AzureRMServiceBusInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_servicebusqueue.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_servicebusqueue.py new file mode 100644 index 000000000..edb810374 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_servicebusqueue.py @@ -0,0 +1,347 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Yuwei Zhou, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_servicebusqueue +version_added: "0.1.2" +short_description: Manage Azure Service Bus queue +description: + - Create, update or delete an Azure Service Bus queue. +options: + resource_group: + description: + - Name of resource group. + required: true + name: + description: + - Name of the queue. + required: true + namespace: + description: + - Servicebus namespace name. + - A namespace is a scoping container for all messaging components. + - Multiple queues and topics can reside within a single namespace, and namespaces often serve as application containers. + required: true + state: + description: + - Assert the state of the queue. Use C(present) to create or update and use C(absent) to delete. + default: present + choices: + - absent + - present + auto_delete_on_idle_in_seconds: + description: + - Time idle interval after which a queue is automatically deleted. + - The minimum duration is 5 minutes. + type: int + dead_lettering_on_message_expiration: + description: + - A value that indicates whether a queue has dead letter support when a message expires. + type: bool + default_message_time_to_live_seconds: + description: + - Default message timespan to live value. + - This is the duration after which the message expires, starting from when the message is sent to Service Bus. + - This is the default value used when TimeToLive is not set on a message itself. + type: int + enable_batched_operations: + description: + - Value that indicates whether server-side batched operations are enabled. + type: bool + enable_express: + description: + - Value that indicates whether Express Entities are enabled. + - An express topic or queue holds a message in memory temporarily before writing it to persistent storage. + type: bool + enable_partitioning: + description: + - A value that indicates whether the topic or queue is to be partitioned across multiple message brokers. + type: bool + forward_dead_lettered_messages_to: + description: + - Queue or topic name to forward the Dead Letter message for a queue. + forward_to: + description: + - Queue or topic name to forward the messages for a queue. + lock_duration_in_seconds: + description: + - Timespan duration of a peek-lock. + - The amount of time that the message is locked for other receivers. + - The maximum value for LockDuration is 5 minutes. + type: int + max_delivery_count: + description: + - The maximum delivery count. + - A message is automatically deadlettered after this number of deliveries. + type: int + max_message_size_in_kb: + description: + - Maximum size (in KB) of the message payload that can be accepted by the queue. + - This property is only used in Premium today and default is 1024. + type: int + max_size_in_mb: + description: + - The maximum size of the queue in megabytes, which is the size of memory allocated for the queue. + type: int + requires_duplicate_detection: + description: + - A value indicating if this queue or topic requires duplicate detection. + type: bool + duplicate_detection_time_in_seconds: + description: + - TimeSpan structure that defines the duration of the duplicate detection history. + type: int + requires_session: + description: + - A value that indicates whether the queue supports the concept of sessions. + type: bool + status: + description: + - Status of the entity. + choices: + - active + - disabled + - send_disabled + - receive_disabled +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Yuwei Zhou (@yuwzho) + +''' + +EXAMPLES = ''' +- name: Create a queue + azure_rm_servicebusqueue: + name: subqueue + resource_group: myResourceGroup + namespace: bar + duplicate_detection_time_in_seconds: 600 +''' +RETURN = ''' +id: + description: + - Current state of the queue. + returned: success + type: str + sample: "/subscriptions/xxx...xxx/resourceGroups/v-xisuRG/providers/Microsoft.ServiceBus/namespaces/nsb57dc9561/queues/queueb57dc9561" +''' + +try: + from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +except ImportError: + # This is handled in azure_rm_common + pass + +from ansible.module_utils.common.dict_transformations import _snake_to_camel, _camel_to_snake +from ansible.module_utils._text import to_native +from datetime import datetime, timedelta + + +duration_spec_map = dict( + default_message_time_to_live='default_message_time_to_live_seconds', + duplicate_detection_history_time_window='duplicate_detection_time_in_seconds', + auto_delete_on_idle='auto_delete_on_idle_in_seconds', + lock_duration='lock_duration_in_seconds' +) + + +sas_policy_spec = dict( + state=dict(type='str', default='present', choices=['present', 'absent']), + name=dict(type='str', required=True), + regenerate_key=dict(type='bool'), + rights=dict(type='str', choices=['manage', 'listen', 'send', 'listen_send']) +) + + +class AzureRMServiceBusQueue(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True), + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + namespace=dict(type='str', required=True), + auto_delete_on_idle_in_seconds=dict(type='int'), + dead_lettering_on_message_expiration=dict(type='bool'), + default_message_time_to_live_seconds=dict(type='int'), + duplicate_detection_time_in_seconds=dict(type='int'), + enable_batched_operations=dict(type='bool'), + enable_express=dict(type='bool'), + enable_partitioning=dict(type='bool'), + forward_dead_lettered_messages_to=dict(type='str'), + forward_to=dict(type='str'), + lock_duration_in_seconds=dict(type='int'), + max_delivery_count=dict(type='int'), + max_message_size_in_kb=dict(type='int'), + max_size_in_mb=dict(type='int'), + requires_duplicate_detection=dict(type='bool'), + requires_session=dict(type='bool'), + status=dict(type='str', + choices=['active', 'disabled', 'send_disabled', 'receive_disabled']) + ) + + self.resource_group = None + self.name = None + self.state = None + self.namespace = None + self.location = None + self.type = None + self.subscription_topic_name = None + self.auto_delete_on_idle_in_seconds = None + self.dead_lettering_on_message_expiration = None + self.default_message_time_to_live_seconds = None + self.enable_batched_operations = None + self.enable_express = None + self.enable_partitioning = None + self.forward_dead_lettered_messages_to = None + self.forward_to = None + self.lock_duration_in_seconds = None + self.max_delivery_count = None + self.max_size_in_mb = None + self.requires_duplicate_detection = None + self.status = None + self.requires_session = None + self.max_message_size_in_kb = None + + self.results = dict( + changed=False, + id=None + ) + + super(AzureRMServiceBusQueue, self).__init__(self.module_arg_spec, + supports_tags=False, + supports_check_mode=True) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()): + setattr(self, key, kwargs[key]) + + changed = False + + original = self.get() + if self.state == 'present': + # Create the resource instance + params = dict( + dead_lettering_on_message_expiration=self.dead_lettering_on_message_expiration, + enable_batched_operations=self.enable_batched_operations, + enable_express=self.enable_express, + enable_partitioning=self.enable_partitioning, + forward_dead_lettered_messages_to=self.forward_dead_lettered_messages_to, + forward_to=self.forward_to, + max_delivery_count=self.max_delivery_count, + max_message_size_in_kilobytes=self.max_message_size_in_kb, + max_size_in_megabytes=self.max_size_in_mb, + requires_session=self.requires_session, + requires_duplicate_detection=self.requires_duplicate_detection + ) + if self.status: + params['status'] = self.servicebus_models.EntityStatus(str.capitalize(_snake_to_camel(self.status))) + for k, v in duration_spec_map.items(): + seconds = getattr(self, v) + if seconds: + params[k] = timedelta(seconds=seconds) + + instance = self.servicebus_models.SBQueue(**params) + result = original + if not original: + changed = True + result = instance + else: + result = original + attribute_map = set(self.servicebus_models.SBQueue._attribute_map.keys()) - set(self.servicebus_models.SBQueue._validation.keys()) + for attribute in attribute_map: + value = getattr(instance, attribute) + if value and value != getattr(original, attribute): + changed = True + if changed and not self.check_mode: + result = self.create_or_update(instance) + self.results = self.to_dict(result) + elif original: + changed = True + if not self.check_mode: + self.delete() + self.results['deleted'] = True + + self.results['changed'] = changed + return self.results + + def create_or_update(self, param): + + try: + client = self._get_client() + return client.create_or_update(self.resource_group, self.namespace, self.name, param) + except Exception as exc: + self.fail('Error creating or updating queue {0} - {1}'.format(self.name, str(exc.inner_exception) or str(exc))) + + def delete(self): + try: + client = self._get_client() + client.delete(self.resource_group, self.namespace, self.name) + return True + except Exception as exc: + self.fail("Error deleting queue {0} - {1}".format(self.name, str(exc))) + + def _get_client(self): + return self.servicebus_client.queues + + def get(self): + try: + client = self._get_client() + return client.get(self.resource_group, self.namespace, self.name) + except Exception: + return None + + def to_dict(self, instance): + result = dict() + attribute_map = self.servicebus_models.SBQueue._attribute_map + for attribute in attribute_map.keys(): + value = getattr(instance, attribute) + if not value: + continue + if attribute_map[attribute]['type'] == 'duration': + if is_valid_timedelta(value): + key = duration_spec_map.get(attribute) or attribute + result[key] = int(value.total_seconds()) + elif attribute == 'status': + result['status'] = _camel_to_snake(value) + elif isinstance(value, self.servicebus_models.MessageCountDetails): + result[attribute] = value.as_dict() + elif isinstance(value, self.servicebus_models.SBSku): + result[attribute] = value.name.lower() + elif isinstance(value, datetime): + result[attribute] = str(value) + elif isinstance(value, str): + result[attribute] = to_native(value) + elif attribute == 'max_size_in_megabytes': + result['max_size_in_mb'] = value + elif attribute == 'max_size_in_kilobytes': + result['max_size_in_kb'] = value + else: + result[attribute] = value + return result + + +def is_valid_timedelta(value): + if value == timedelta(10675199, 10085, 477581): + return None + return value + + +def main(): + AzureRMServiceBusQueue() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_servicebussaspolicy.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_servicebussaspolicy.py new file mode 100644 index 000000000..3acb2f479 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_servicebussaspolicy.py @@ -0,0 +1,325 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Yuwei Zhou, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_servicebussaspolicy +version_added: "0.1.2" +short_description: Manage Azure Service Bus SAS policy +description: + - Create, update or delete an Azure Service Bus SAS policy. +options: + resource_group: + description: + - Name of resource group. + required: true + name: + description: + - Name of the SAS policy. + required: true + state: + description: + - Assert the state of the route. Use C(present) to create or update and C(absent) to delete. + default: present + choices: + - absent + - present + namespace: + description: + - Manage SAS policy for a namespace without C(queue) or C(topic) set. + - Manage SAS policy for a queue or topic under this namespace. + required: true + queue: + description: + - Type of the messaging queue. + - Cannot set C(topc) when this field set. + topic: + description: + - Name of the messaging topic. + - Cannot set C(queue) when this field set. + regenerate_primary_key: + description: + - Regenerate the SAS policy primary key. + type: bool + default: False + regenerate_secondary_key: + description: + - Regenerate the SAS policy secondary key. + type: bool + default: False + rights: + description: + - Claim rights of the SAS policy. + required: True + choices: + - manage + - listen + - send + - listen_send + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Yuwei Zhou (@yuwzho) + +''' + +EXAMPLES = ''' +- name: Create a namespace + azure_rm_servicebussaspolicy: + name: deadbeef + queue: qux + namespace: bar + resource_group: myResourceGroup + rights: send +''' +RETURN = ''' +id: + description: + - Current state of the SAS policy. + returned: Successed + type: str + sample: "/subscriptions/xxx...xxx/resourceGroups/myResourceGroup/providers/Microsoft.ServiceBus/ + namespaces/nsb57dc95979/topics/topicb57dc95979/authorizationRules/testpolicy" +keys: + description: + - Key dict of the SAS policy. + returned: Successed + type: complex + contains: + key_name: + description: + - Name of the SAS policy. + returned: Successed + type: str + sample: testpolicy + primary_connection_string: + description: + - Primary connection string. + returned: Successed + type: str + sample: "Endpoint=sb://nsb57dc95979.servicebus.windows.net/;SharedAccessKeyName=testpolicy; + SharedAccessKey=xxxxxxxxxxxxxxxxxxxxxxxxxxxx" + primary_key: + description: + - Primary key. + returned: Successed + type: str + sample: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + secondary_key: + description: + - Secondary key. + returned: Successed + type: str + sample: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + secondary_connection_string: + description: + - Secondary connection string. + returned: Successed + type: str + sample: "Endpoint=sb://nsb57dc95979.servicebus.windows.net/;SharedAccessKeyName=testpolicy; + SharedAccessKey=xxxxxxxxxxxxxxxxxxxxxxxxx" +name: + description: + - Name of the SAS policy. + returned: Successed + type: str + sample: testpolicy +rights: + description: + - Priviledge of the SAS policy. + returned: Successed + type: str + sample: manage +type: + description: + - Type of the SAS policy. + returned: Successed + type: str + sample: "Microsoft.ServiceBus/Namespaces/Topics/AuthorizationRules" +''' + +try: + from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +except ImportError: + # This is handled in azure_rm_common + pass + +from ansible.module_utils.common.dict_transformations import _snake_to_camel, _camel_to_snake +from ansible.module_utils._text import to_native +from datetime import datetime, timedelta + + +class AzureRMServiceBusSASPolicy(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True), + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + namespace=dict(type='str', required=True), + queue=dict(type='str'), + topic=dict(type='str'), + regenerate_primary_key=dict(type='bool', default=False), + regenerate_secondary_key=dict(type='bool', default=False), + rights=dict(type='str', choices=['manage', 'listen', 'send', 'listen_send']) + ) + + mutually_exclusive = [ + ['queue', 'topic'] + ] + + required_if = [('state', 'present', ['rights'])] + + self.resource_group = None + self.name = None + self.state = None + self.namespace = None + self.queue = None + self.topic = None + self.regenerate_primary_key = False + self.regenerate_secondary_key = False + self.rights = None + + self.results = dict( + changed=False, + id=None + ) + + super(AzureRMServiceBusSASPolicy, self).__init__(self.module_arg_spec, + mutually_exclusive=mutually_exclusive, + required_if=required_if, + supports_tags=False, + supports_check_mode=True) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()): + setattr(self, key, kwargs[key]) + + changed = False + + policy = self.get_auth_rule() + if self.state == 'present': + if not policy: # Create a new one + changed = True + if not self.check_mode: + policy = self.create_sas_policy() + else: + changed = changed | self.regenerate_primary_key | self.regenerate_secondary_key + if self.regenerate_primary_key and not self.check_mode: + self.regenerate_sas_key('primary') + if self.regenerate_secondary_key and not self.check_mode: + self.regenerate_sas_key('secondary') + self.results = self.policy_to_dict(policy) + self.results['keys'] = self.get_sas_key() + elif policy: + changed = True + if not self.check_mode: + self.delete_sas_policy() + + self.results['changed'] = changed + return self.results + + def _get_client(self): + if self.queue: + return self.servicebus_client.queues + elif self.topic: + return self.servicebus_client.topics + return self.servicebus_client.namespaces + + # SAS policy + def create_sas_policy(self): + if self.rights == 'listen_send': + rights = ['Listen', 'Send'] + elif self.rights == 'manage': + rights = ['Listen', 'Send', 'Manage'] + else: + rights = [str.capitalize(self.rights)] + try: + client = self._get_client() + if self.queue or self.topic: + rule = client.create_or_update_authorization_rule(self.resource_group, + self.namespace, + self.queue or self.topic, + self.name, parameters={'rights': rights}) + else: + rule = client.create_or_update_authorization_rule(self.resource_group, self.namespace, self.name, parameters={'rights': rights}) + return rule + except Exception as exc: + self.fail('Error when creating or updating SAS policy {0} - {1}'.format(self.name, exc.message or str(exc))) + return None + + def get_auth_rule(self): + rule = None + try: + client = self._get_client() + if self.queue or self.topic: + rule = client.get_authorization_rule(self.resource_group, self.namespace, self.queue or self.topic, self.name) + else: + rule = client.get_authorization_rule(self.resource_group, self.namespace, self.name) + except Exception: + pass + return rule + + def delete_sas_policy(self): + try: + client = self._get_client() + if self.queue or self.topic: + client.delete_authorization_rule(self.resource_group, self.namespace, self.queue or self.topic, self.name) + else: + client.delete_authorization_rule(self.resource_group, self.namespace, self.name) + return True + except Exception as exc: + self.fail('Error when deleting SAS policy {0} - {1}'.format(self.name, exc.message or str(exc))) + + def regenerate_sas_key(self, key_type): + try: + client = self._get_client() + key = str.capitalize(key_type) + 'Key' + if self.queue or self.topic: + client.regenerate_keys(self.resource_group, self.namespace, self.queue or self.topic, self.name, key) + else: + client.regenerate_keys(self.resource_group, self.namespace, self.name, key) + except Exception as exc: + self.fail('Error when generating SAS policy {0}\'s key - {1}'.format(self.name, exc.message or str(exc))) + return None + + def get_sas_key(self): + try: + client = self._get_client() + if self.queue or self.topic: + return client.list_keys(self.resource_group, self.namespace, self.queue or self.topic, self.name).as_dict() + else: + return client.list_keys(self.resource_group, self.namespace, self.name).as_dict() + except Exception: + pass + return None + + def policy_to_dict(self, rule): + result = rule.as_dict() + rights = result['rights'] + if 'Manage' in rights: + result['rights'] = 'manage' + elif 'Listen' in rights and 'Send' in rights: + result['rights'] = 'listen_send' + else: + result['rights'] = rights[0].lower() + return result + + +def main(): + AzureRMServiceBusSASPolicy() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_servicebustopic.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_servicebustopic.py new file mode 100644 index 000000000..c2eb25e86 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_servicebustopic.py @@ -0,0 +1,306 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Yuwei Zhou, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_servicebustopic +version_added: "0.1.2" +short_description: Manage Azure Service Bus +description: + - Create, update or delete an Azure Service Bus topics. +options: + resource_group: + description: + - Name of resource group. + required: true + name: + description: + - Name of the topic. + required: true + namespace: + description: + - Servicebus namespace name. + - A namespace is a scoping container for all messaging components. + - Multipletopics can reside within a single namespace. + required: true + state: + description: + - Assert the state of the topic. Use C(present) to create or update and use C(absent) to delete. + default: present + choices: + - absent + - present + auto_delete_on_idle_in_seconds: + description: + - Time idle interval after which a topic is automatically deleted. + - The minimum duration is 5 minutes. + type: int + default_message_time_to_live_seconds: + description: + - Default message timespan to live value. + - This is the duration after which the message expires, starting from when the message is sent to Service Bus. + - This is the default value used when TimeToLive is not set on a message itself. + type: int + enable_batched_operations: + description: + - Value that indicates whether server-side batched operations are enabled. + type: bool + enable_express: + description: + - Value that indicates whether Express Entities are enabled. + - An express topic holds a message in memory temporarily before writing it to persistent storage. + type: bool + enable_partitioning: + description: + - A value that indicates whether the topic is to be partitioned across multiple message brokers. + type: bool + max_message_size_in_kb: + description: + - Maximum size (in KB) of the message payload that can be accepted by the queue. + - This property is only used in Premium today and default is 1024. + type: int + max_size_in_mb: + description: + - The maximum size of the topic in megabytes, which is the size of memory allocated for the topic. + type: int + requires_duplicate_detection: + description: + - A value indicating if this topic requires duplicate detection. + type: bool + duplicate_detection_time_in_seconds: + description: + - TimeSpan structure that defines the duration of the duplicate detection history. + type: int + support_ordering: + description: + - Value that indicates whether the topic supports ordering. + type: bool + status: + description: + - Status of the entity. + choices: + - active + - disabled + - send_disabled + - receive_disabled + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Yuwei Zhou (@yuwzho) + +''' + +EXAMPLES = ''' +- name: Create a topic + azure_rm_servicebustopic: + name: subtopic + resource_group: myResourceGroup + namespace: bar + duplicate_detection_time_in_seconds: 600 +''' +RETURN = ''' +id: + description: + - Current state of the topic. + returned: success + type: str + sample: "/subscriptions/xxx...xxx/resourceGroups/myResourceGroup/providers/Microsoft.ServiceBus/namespaces/nsb57dc95979/topics/topicb57dc95979" +''' + +try: + from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +except ImportError: + # This is handled in azure_rm_common + pass + +from ansible.module_utils.common.dict_transformations import _snake_to_camel, _camel_to_snake +from ansible.module_utils._text import to_native +from datetime import datetime, timedelta + + +duration_spec_map = dict( + default_message_time_to_live='default_message_time_to_live_seconds', + duplicate_detection_history_time_window='duplicate_detection_time_in_seconds', + auto_delete_on_idle='auto_delete_on_idle_in_seconds' +) + + +sas_policy_spec = dict( + state=dict(type='str', default='present', choices=['present', 'absent']), + name=dict(type='str', required=True), + regenerate_key=dict(type='bool'), + rights=dict(type='str', choices=['manage', 'listen', 'send', 'listen_send']) +) + + +class AzureRMServiceBusTopic(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + auto_delete_on_idle_in_seconds=dict(type='int'), + default_message_time_to_live_seconds=dict(type='int'), + duplicate_detection_time_in_seconds=dict(type='int'), + enable_batched_operations=dict(type='bool'), + enable_express=dict(type='bool'), + enable_partitioning=dict(type='bool'), + max_size_in_mb=dict(type='int'), + max_message_size_in_kb=dict(type='int'), + name=dict(type='str', required=True), + namespace=dict(type='str'), + requires_duplicate_detection=dict(type='bool'), + resource_group=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + status=dict(type='str', + choices=['active', 'disabled', 'send_disabled', 'receive_disabled']), + support_ordering=dict(type='bool') + ) + + self.resource_group = None + self.name = None + self.state = None + self.namespace = None + self.auto_delete_on_idle_in_seconds = None + self.default_message_time_to_live_seconds = None + self.enable_batched_operations = None + self.enable_express = None + self.enable_partitioning = None + self.max_size_in_mb = None + self.requires_duplicate_detection = None + self.status = None + self.support_ordering = None + self.max_message_size_in_kb = None + + self.results = dict( + changed=False, + id=None + ) + + super(AzureRMServiceBusTopic, self).__init__(self.module_arg_spec, + supports_tags=False, + supports_check_mode=True) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()): + setattr(self, key, kwargs[key]) + + changed = False + original = self.get() + if self.state == 'present': + # Create the resource instance + params = dict( + enable_batched_operations=self.enable_batched_operations, + enable_express=self.enable_express, + enable_partitioning=self.enable_partitioning, + max_size_in_megabytes=self.max_size_in_mb, + max_message_size_in_kilobytes=self.max_message_size_in_kb, + support_ordering=self.support_ordering + ) + if self.status: + params['status'] = self.servicebus_models.EntityStatus(str.capitalize(_snake_to_camel(self.status))) + for k, v in duration_spec_map.items(): + seconds = getattr(self, v) + if seconds: + params[k] = timedelta(seconds=seconds) + + instance = self.servicebus_models.SBTopic(**params) + result = original + if not original: + changed = True + result = instance + else: + result = original + attribute_map = set(self.servicebus_models.SBTopic._attribute_map.keys()) - set(self.servicebus_models.SBTopic._validation.keys()) + for attribute in attribute_map: + value = getattr(instance, attribute) + if value and value != getattr(original, attribute): + changed = True + if changed and not self.check_mode: + result = self.create_or_update(instance) + self.results = self.to_dict(result) + elif original: + changed = True + if not self.check_mode: + self.delete() + self.results['deleted'] = True + + self.results['changed'] = changed + return self.results + + def create_or_update(self, param): + try: + client = self._get_client() + return client.create_or_update(self.resource_group, self.namespace, self.name, param) + except Exception as exc: + self.fail('Error creating or updating topic {0} - {1}'.format(self.name, str(exc.inner_exception) or str(exc))) + + def delete(self): + try: + client = self._get_client() + client.delete(self.resource_group, self.namespace, self.name) + return True + except Exception as exc: + self.fail("Error deleting topic {0} - {1}".format(self.name, str(exc))) + + def _get_client(self): + return self.servicebus_client.topics + + def get(self): + try: + client = self._get_client() + return client.get(self.resource_group, self.namespace, self.name) + except Exception: + return None + + def to_dict(self, instance): + result = dict() + attribute_map = self.servicebus_models.SBTopic._attribute_map + for attribute in attribute_map.keys(): + value = getattr(instance, attribute) + if not value: + continue + if attribute_map[attribute]['type'] == 'duration': + if is_valid_timedelta(value): + key = duration_spec_map.get(attribute) or attribute + result[key] = int(value.total_seconds()) + elif attribute == 'status': + result['status'] = _camel_to_snake(value) + elif isinstance(value, self.servicebus_models.MessageCountDetails): + result[attribute] = value.as_dict() + elif isinstance(value, self.servicebus_models.SBSku): + result[attribute] = value.name.lower() + elif isinstance(value, datetime): + result[attribute] = str(value) + elif isinstance(value, str): + result[attribute] = to_native(value) + elif attribute == 'max_size_in_megabytes': + result['max_size_in_mb'] = value + elif attribute == 'max_message_size_in_kilobyte': + result['max_message_size_in_kb'] = value + else: + result[attribute] = value + return result + + +def is_valid_timedelta(value): + if value == timedelta(10675199, 10085, 477581): + return None + return value + + +def main(): + AzureRMServiceBusTopic() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_servicebustopicsubscription.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_servicebustopicsubscription.py new file mode 100644 index 000000000..b6cc523db --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_servicebustopicsubscription.py @@ -0,0 +1,314 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Yuwei Zhou, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_servicebustopicsubscription +version_added: "0.1.2" +short_description: Manage Azure Service Bus subscription +description: + - Create, update or delete an Azure Service Bus subscriptions. +options: + resource_group: + description: + - Name of resource group. + required: true + name: + description: + - Name of the servicebus subscription. + required: true + state: + description: + - Assert the state of the servicebus subscription. Use C(present) to create or update and use C(absent) to delete. + default: present + choices: + - absent + - present + namespace: + description: + - Servicebus namespace name. + - A namespace is a scoping container for all messaging components. + - Multiple subscriptions and topics can reside within a single namespace, and namespaces often serve as application containers. + required: true + topic: + description: + - Topic name which the subscription subscribe to. + required: true + auto_delete_on_idle_in_seconds: + description: + - Time idle interval after which a subscription is automatically deleted. + - The minimum duration is 5 minutes. + type: int + dead_lettering_on_message_expiration: + description: + - A value that indicates whether a subscription has dead letter support when a message expires. + type: bool + dead_lettering_on_filter_evaluation_exceptions: + description: + - Value that indicates whether a subscription has dead letter support on filter evaluation exceptions. + type: bool + default_message_time_to_live_seconds: + description: + - Default message timespan to live value. + - This is the duration after which the message expires, starting from when the message is sent to Service Bus. + - This is the default value used when TimeToLive is not set on a message itself. + type: int + enable_batched_operations: + description: + - Value that indicates whether server-side batched operations are enabled. + type: bool + forward_dead_lettered_messages_to: + description: + - Queue or topic name to forward the Dead Letter message for a subscription. + forward_to: + description: + - Queue or topic name to forward the messages for a subscription. + lock_duration_in_seconds: + description: + - Timespan duration of a peek-lock. + - The amount of time that the message is locked for other receivers. + - The maximum value for LockDuration is 5 minutes. + type: int + max_delivery_count: + description: + - he maximum delivery count. + - A message is automatically deadlettered after this number of deliveries. + type: int + requires_session: + description: + - A value that indicates whether the subscription supports the concept of sessions. + type: bool + duplicate_detection_time_in_seconds: + description: + - TimeSpan structure that defines the duration of the duplicate detection history. + type: int + status: + description: + - Status of the entity. + choices: + - active + - disabled + - send_disabled + - receive_disabled + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Yuwei Zhou (@yuwzho) + +''' + +EXAMPLES = ''' +- name: Create a subscription + azure_rm_servicebustopicsubscription: + name: sbsub + resource_group: myResourceGroup + namespace: bar + topic: subtopic +''' +RETURN = ''' +id: + description: + - Current state of the subscription. + returned: success + type: str + sample: "/subscriptions/xxx...xxx/resourceGroups/myResourceGroup/providers/Microsoft.ServiceBus/ + namespaces/nsb57dc95979/topics/topicb57dc95979/subscriptions/subsb57dc95979" +''' + +try: + from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +except ImportError: + # This is handled in azure_rm_common + pass + +from ansible.module_utils.common.dict_transformations import _snake_to_camel, _camel_to_snake +from ansible.module_utils._text import to_native +from datetime import datetime, timedelta + + +duration_spec_map = dict( + default_message_time_to_live='default_message_time_to_live_seconds', + duplicate_detection_history_time_window='duplicate_detection_time_in_seconds', + auto_delete_on_idle='auto_delete_on_idle_in_seconds', + lock_duration='lock_duration_in_seconds' +) + + +class AzureRMServiceSubscription(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + auto_delete_on_idle_in_seconds=dict(type='int'), + dead_lettering_on_filter_evaluation_exceptions=dict(type='bool'), + dead_lettering_on_message_expiration=dict(type='bool'), + default_message_time_to_live_seconds=dict(type='int'), + duplicate_detection_time_in_seconds=dict(type='int'), + enable_batched_operations=dict(type='bool'), + forward_dead_lettered_messages_to=dict(type='str'), + forward_to=dict(type='str'), + lock_duration_in_seconds=dict(type='int'), + max_delivery_count=dict(type='int'), + name=dict(type='str', required=True), + namespace=dict(type='str', required=True), + requires_session=dict(type='bool'), + resource_group=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + status=dict(type='str', + choices=['active', 'disabled', 'send_disabled', 'receive_disabled']), + topic=dict(type='str', required=True) + ) + + self.auto_delete_on_idle_in_seconds = None + self.dead_lettering_on_filter_evaluation_exceptions = None + self.dead_lettering_on_message_expiration = None + self.default_message_time_to_live_seconds = None + self.duplicate_detection_time_in_seconds = None + self.enable_batched_operations = None + self.forward_dead_lettered_messages_to = None + self.forward_to = None + self.lock_duration_in_seconds = None + self.max_delivery_count = None + self.name = None + self.namespace = None + self.requires_session = None + self.resource_group = None + self.state = None + self.status = None + self.topic = None + + self.results = dict( + changed=False, + id=None + ) + + super(AzureRMServiceSubscription, self).__init__(self.module_arg_spec, + supports_tags=False, + supports_check_mode=True) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()): + setattr(self, key, kwargs[key]) + + changed = False + + original = self.get() + if self.state == 'present': + # Create the resource instance + params = dict( + dead_lettering_on_filter_evaluation_exceptions=self.dead_lettering_on_filter_evaluation_exceptions, + dead_lettering_on_message_expiration=self.dead_lettering_on_message_expiration, + enable_batched_operations=self.enable_batched_operations, + forward_dead_lettered_messages_to=self.forward_dead_lettered_messages_to, + forward_to=self.forward_to, + max_delivery_count=self.max_delivery_count, + requires_session=self.requires_session + ) + if self.status: + params['status'] = self.servicebus_models.EntityStatus(str.capitalize(_snake_to_camel(self.status))) + for k, v in duration_spec_map.items(): + seconds = getattr(self, v) + if seconds: + params[k] = timedelta(seconds=seconds) + + instance = self.servicebus_models.SBSubscription(**params) + result = original + if not original: + changed = True + result = instance + else: + result = original + attribute_map_keys = set(self.servicebus_models.SBSubscription._attribute_map.keys()) + validation_keys = set(self.servicebus_models.SBSubscription._validation.keys()) + attribute_map = attribute_map_keys - validation_keys + for attribute in attribute_map: + value = getattr(instance, attribute) + if value and value != getattr(original, attribute): + changed = True + if changed and not self.check_mode: + result = self.create_or_update(instance) + self.results = self.to_dict(result) + elif original: + changed = True + if not self.check_mode: + self.delete() + self.results['deleted'] = True + + self.results['changed'] = changed + return self.results + + def create_or_update(self, param): + try: + client = self._get_client() + return client.create_or_update(self.resource_group, self.namespace, self.topic, self.name, param) + except Exception as exc: + self.fail("Error creating or updating servicebus subscription {0} - {1}".format(self.name, str(exc))) + + def delete(self): + try: + client = self._get_client() + client.delete(self.resource_group, self.namespace, self.topic, self.name) + return True + except Exception as exc: + self.fail("Error deleting servicebus subscription {0} - {1}".format(self.name, str(exc))) + + def _get_client(self): + return self.servicebus_client.subscriptions + + def get(self): + try: + client = self._get_client() + return client.get(self.resource_group, self.namespace, self.topic, self.name) + except Exception: + return None + + def to_dict(self, instance): + result = dict() + attribute_map = self.servicebus_models.SBSubscription._attribute_map + for attribute in attribute_map.keys(): + value = getattr(instance, attribute) + if not value: + continue + if attribute_map[attribute]['type'] == 'duration': + if is_valid_timedelta(value): + key = duration_spec_map.get(attribute) or attribute + result[key] = int(value.total_seconds()) + elif attribute == 'status': + result['status'] = _camel_to_snake(value) + elif isinstance(value, self.servicebus_models.MessageCountDetails): + result[attribute] = value.as_dict() + elif isinstance(value, self.servicebus_models.SBSku): + result[attribute] = value.name.lower() + elif isinstance(value, datetime): + result[attribute] = str(value) + elif isinstance(value, str): + result[attribute] = to_native(value) + elif attribute == 'max_size_in_megabytes': + result['max_size_in_mb'] = value + else: + result[attribute] = value + return result + + +def is_valid_timedelta(value): + if value == timedelta(10675199, 10085, 477581): + return None + return value + + +def main(): + AzureRMServiceSubscription() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_snapshot.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_snapshot.py new file mode 100644 index 000000000..15c814414 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_snapshot.py @@ -0,0 +1,397 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Zim Kalinowski, (@zikalino) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_snapshot +version_added: "0.1.2" +short_description: Manage Azure Snapshot instance +description: + - Create, update and delete instance of Azure Snapshot. +options: + resource_group: + description: + - The name of the resource group. + required: true + type: str + name: + description: + - Resource name. + type: str + location: + description: + - Resource location. + type: str + incremental: + description: + - Whether a snapshot is incremental. + - Incremental snapshots on the same disk occupy less space than full snapshots and can be diffed. + type: bool + default: False + sku: + description: + - The snapshots SKU. + type: dict + suboptions: + name: + description: + - The sku name. + type: str + choices: + - Standard_LRS + - Premium_LRS + - Standard_ZRS + tier: + description: + - The sku tier. + type: str + os_type: + description: + - The Operating System type. + type: str + choices: + - Linux + - Windows + creation_data: + description: + - Disk source information. + - CreationData information cannot be changed after the disk has been created. + type: dict + suboptions: + create_option: + description: + - This enumerates the possible sources of a disk's creation. + type: str + choices: + - Import + - Copy + source_uri: + description: + - If I(create_option=Import), this is the URI of a blob to be imported into a managed disk. + type: str + source_id: + description: + - If I(create_option=Copy), this is the resource ID of a managed disk to be copied from. + type: str + state: + description: + - Assert the state of the Snapshot. + - Use C(present) to create or update an Snapshot and C(absent) to delete it. + default: present + type: str + choices: + - absent + - present +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' +- name: Create a snapshot by importing an unmanaged blob from the same subscription. + azure_rm_snapshot: + resource_group: myResourceGroup + name: mySnapshot + location: eastus + creation_data: + create_option: Import + source_uri: 'https://mystorageaccount.blob.core.windows.net/osimages/osimage.vhd' + +- name: Create a snapshot by copying an existing managed disk. + azure_rm_snapshot: + resource_group: myResourceGroup + name: mySnapshot + location: eastus + creation_data: + create_option: Copy + source_id: '/subscriptions/sub123/resourceGroups/group123/providers/Microsoft.Compute/disks/disk123' +''' + +RETURN = ''' +id: + description: + - Resource ID. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/snapshots/mySnapshot +''' + +import time +import json +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_rest import GenericRestClient +try: + from msrestazure.azure_exceptions import CloudError +except ImportError: + # this is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMSnapshots(AzureRMModuleBaseExt): + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + updatable=False, + disposition='resourceGroupName', + required=True + ), + name=dict( + type='str', + updatable=False, + disposition='snapshotName', + required=True + ), + location=dict( + type='str', + updatable=False, + disposition='/' + ), + sku=dict( + type='dict', + disposition='/', + options=dict( + name=dict( + type='str', + choices=['Standard_LRS', + 'Premium_LRS', + 'Standard_ZRS'] + ), + tier=dict( + type='str' + ) + ) + ), + os_type=dict( + type='str', + disposition='/properties/osType', + choices=['Windows', + 'Linux'] + ), + incremental=dict(type='bool', default=False), + creation_data=dict( + type='dict', + disposition='/properties/creationData', + options=dict( + create_option=dict( + type='str', + disposition='createOption', + choices=['Import', 'Copy'], + ), + source_uri=dict( + type='str', + disposition='sourceUri', + purgeIfNone=True + ), + source_id=dict( + type='str', + disposition='sourceResourceId', + purgeIfNone=True + ) + ) + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.name = None + self.id = None + self.name = None + self.type = None + self.managed_by = None + + self.results = dict(changed=False) + self.mgmt_client = None + self.state = None + self.url = None + self.status_code = [200, 201, 202] + self.to_do = Actions.NoAction + + self.body = {} + self.body['properties'] = dict() + self.query_parameters = {} + self.query_parameters['api-version'] = '2019-03-01' + self.header_parameters = {} + self.header_parameters['Content-Type'] = 'application/json; charset=utf-8' + + super(AzureRMSnapshots, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()): + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + if key == 'incremental': + self.body['properties']['incremental'] = kwargs[key] + else: + self.body[key] = kwargs[key] + + self.inflate_parameters(self.module_arg_spec, self.body, 0) + + old_response = None + response = None + + self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient, + base_url=self._cloud_environment.endpoints.resource_manager) + + resource_group = self.get_resource_group(self.resource_group) + + if 'location' not in self.body: + self.body['location'] = resource_group.location + + self.url = ('/subscriptions' + + '/{{ subscription_id }}' + + '/resourceGroups' + + '/{{ resource_group }}' + + '/providers' + + '/Microsoft.Compute' + + '/snapshots' + + '/{{ snapshot_name }}') + self.url = self.url.replace('{{ subscription_id }}', self.subscription_id) + self.url = self.url.replace('{{ resource_group }}', self.resource_group) + self.url = self.url.replace('{{ snapshot_name }}', self.name) + + old_response = self.get_resource() + + if not old_response: + self.log("Snapshot instance doesn't exist") + + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + self.log('Snapshot instance already exists') + + if self.state == 'absent': + self.to_do = Actions.Delete + else: + modifiers = {} + self.create_compare_modifiers(self.module_arg_spec, '', modifiers) + self.results['modifiers'] = modifiers + self.results['compare'] = [] + self.create_compare_modifiers(self.module_arg_spec, '', modifiers) + if not self.default_compare(modifiers, self.body, old_response, '', self.results): + self.to_do = Actions.Update + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log('Need to Create / Update the Snapshot instance') + + if self.check_mode: + self.results['changed'] = True + return self.results + response = self.create_update_resource() + self.results['changed'] = True + self.log('Creation / Update done') + elif self.to_do == Actions.Delete: + self.log('Snapshot instance deleted') + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_resource() + + # make sure instance is actually deleted, for some Azure resources, instance is hanging around + # for some time after deletion -- this should be really fixed in Azure + while self.get_resource(): + time.sleep(20) + else: + self.log('Snapshot instance unchanged') + self.results['changed'] = False + response = old_response + + if response: + self.results["id"] = response["id"] + + return self.results + + def create_update_resource(self): + # self.log('Creating / Updating the Snapshot instance {0}'.format(self.)) + try: + response = self.mgmt_client.query(url=self.url, + method='PUT', + query_parameters=self.query_parameters, + header_parameters=self.header_parameters, + body=self.body, + expected_status_codes=self.status_code, + polling_timeout=600, + polling_interval=30) + except CloudError as exc: + self.log('Error attempting to create the Snapshot instance.') + self.fail('Error creating the Snapshot instance: {0}'.format(str(exc))) + + try: + response = json.loads(response.text) + except Exception: + response = {'text': response.text} + + return response + + def delete_resource(self): + # self.log('Deleting the Snapshot instance {0}'.format(self.)) + try: + response = self.mgmt_client.query(url=self.url, + method='DELETE', + query_parameters=self.query_parameters, + header_parameters=self.header_parameters, + body=None, + expected_status_codes=self.status_code, + polling_timeout=600, + polling_interval=30) + except CloudError as e: + self.log('Error attempting to delete the Snapshot instance.') + self.fail('Error deleting the Snapshot instance: {0}'.format(str(e))) + + return True + + def get_resource(self): + # self.log('Checking if the Snapshot instance {0} is present'.format(self.)) + found = False + try: + response = self.mgmt_client.query(url=self.url, + method='GET', + query_parameters=self.query_parameters, + header_parameters=self.header_parameters, + body=None, + expected_status_codes=self.status_code, + polling_timeout=600, + polling_interval=30) + response = json.loads(response.text) + found = True + self.log("Response : {0}".format(response)) + # self.log("Snapshot instance : {0} found".format(response.name)) + except CloudError as e: + self.log('Did not find the Snapshot instance.') + if found is True: + return response + + return False + + +def main(): + AzureRMSnapshots() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqldatabase.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqldatabase.py new file mode 100644 index 000000000..972125b2e --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqldatabase.py @@ -0,0 +1,596 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Zim Kalinowski, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_sqldatabase +version_added: "0.1.2" +short_description: Manage SQL Database instance +description: + - Create, update and delete instance of SQL Database. + +options: + resource_group: + description: + - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. + required: True + server_name: + description: + - The name of the server. + required: True + name: + description: + - The name of the database to be operated on (updated or created). + required: True + location: + description: + - Resource location. If not set, location from the resource group will be used as default. + collation: + description: + - The collation of the database. If not I(create_mode=default), this value is ignored. + create_mode: + description: + - Specifies the mode of database creation. + - C(default), regular database creation. + - C(copy), creates a database as a copy of an existing database. + - C(online_secondary)/C(non_readable_secondary), creates a database as a (readable or nonreadable) secondary replica of an existing database. + - C(point_in_time_restore), Creates a database by restoring a point in time backup of an existing database. + - C(recovery), Creates a database by restoring a geo-replicated backup. + - C(restore), Creates a database by restoring a backup of a deleted database. + - C(restore_long_term_retention_backup), Creates a database by restoring from a long term retention vault. + - C(copy), C(non_readable_secondary), C(online_secondary) and C(restore_long_term_retention_backup) are not supported for C(data_warehouse) edition. + choices: + - 'copy' + - 'default' + - 'non_readable_secondary' + - 'online_secondary' + - 'point_in_time_restore' + - 'recovery' + - 'restore' + - 'restore_long_term_retention_backup' + source_database_id: + description: + - Required unless I(create_mode=default) or I(create_mode=restore_long_term_retention_backup). + - Specifies the resource ID of the source database. + source_database_deletion_date: + description: + - Required if I(create_mode=restore) and I(source_database_id) is the deleted database's original resource id when it existed (as + opposed to its current restorable dropped database ID), then this value is required. Specifies the time that the database was deleted. + restore_point_in_time: + description: + - Required if I(create_mode=point_in_time_restore), this value is required. If I(create_mode=restore), this value is optional. + - Specifies the point in time (ISO8601 format) of the source database that will be restored to create the new database. + - Must be greater than or equal to the source database's earliestRestoreDate value. + recovery_services_recovery_point_resource_id: + description: + - Required if I(create_mode=restore_long_term_retention_backup), then this value is required. + - Specifies the resource ID of the recovery point to restore from. + edition: + description: + - (Deprecate)The edition of the database. The DatabaseEditions enumeration contains all the valid editions. + - This option will be deprecated in 2.11, use I(sku) instead. + - Cannot set C(sku) when this field set. + choices: + - 'web' + - 'business' + - 'basic' + - 'standard' + - 'premium' + - 'free' + - 'stretch' + - 'data_warehouse' + - 'system' + - 'system2' + sku: + description: + - The sku of the database. The DatabaseEditions enumeration contains all the valid sku. + - If I(create_mode=non_readable_secondary) or I(create_mode=online_secondary), this value is ignored. + - To see possible values, query the capabilities API (/subscriptions/{subscriptionId}/providers/Microsoft.Sql/locations/{locationID}/capabilities) + referred to by operationId:'Capabilities_ListByLocation'. + - Cannot set C(edition) when this field set. + suboptions: + name: + description: + - Name of the database SKU, typically, a letter + Number code, e.g. P3 + required: True + tier: + description: + - The tier or edition of the particular SKU, e.g. Basic, Premium + capacity: + description: + - Capacity of the particular SKU. + size: + description: + - Size of the particular SKU + family: + description: + - If the service has different generations of hardware, for the same SKU, then that can be used here + max_size_bytes: + description: + - The max size of the database expressed in bytes. + - If not I(create_mode=default), this value is ignored. + - To see possible values, query the capabilities API (/subscriptions/{subscriptionId}/providers/Microsoft.Sql/locations/{locationID}/capabilities). + referred to by operationId:'Capabilities_ListByLocation'. + elastic_pool_name: + description: + - The name of the elastic pool the database is in. Not supported for I(edition=data_warehouse). + read_scale: + description: + - If the database is a geo-secondary, indicates whether read-only connections are allowed to this database or not. + - Not supported for I(edition=data_warehouse). + type: bool + default: False + sample_name: + description: + - Indicates the name of the sample schema to apply when creating this database. + - If not I(create_mode=default), this value is ignored. + - Not supported for I(edition=data_warehouse). + choices: + - 'adventure_works_lt' + zone_redundant: + description: + - Is this database is zone redundant? It means the replicas of this database will be spread across multiple availability zones. + type: bool + default: False + force_update: + description: + - SQL Database will be updated if given parameters differ from existing resource state. + - To force SQL Database update in any circumstances set this parameter to True. + type: bool + state: + description: + - Assert the state of the SQL Database. Use C(present) to create or update an SQL Database and C(absent) to delete it. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Create (or update) SQL Database + azure_rm_sqldatabase: + resource_group: myResourceGroup + server_name: sqlcrudtest-5961 + name: testdb + location: eastus + + - name: Restore SQL Database + azure_rm_sqldatabase: + resource_group: myResourceGroup + server_name: sqlcrudtest-5961 + name: restoreddb + location: eastus + create_mode: restore + restorable_dropped_database_id: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Sql/s + ervers/testsvr/restorableDroppedDatabases/testdb2,131444841315030000" + + - name: Create SQL Database in Copy Mode + azure_rm_sqldatabase: + resource_group: myResourceGroup + server_name: sqlcrudtest-5961 + name: copydb + location: eastus + create_mode: copy + source_database_id: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Sql/servers/tests + vr/databases/testdb" + + - name: Create (or update) SQL Database with SKU + azure_rm_sqldatabase: + resource_group: myResourceGroup + server_name: sqlcrudtest-5961 + name: testdb + location: eastus + sku: + name: S0 + +''' + +RETURN = ''' +id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Sql/servers/sqlcrudtest-5961/databases/t + estdb" +database_id: + description: + - The ID of the database. + returned: always + type: str + sample: database_id +status: + description: + - The status of the database. + returned: always + type: str + sample: Online +''' + +import time +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id + +try: + import dateutil.parser + from azure.core.exceptions import ResourceNotFoundError + from azure.core.polling import LROPoller + from azure.mgmt.sql.models import Sku +except ImportError: + # This is handled in azure_rm_common + pass + +sku_spec = dict( + name=dict(type='str', required=True), + tier=dict(type='str'), + size=dict(type='str'), + family=dict(type='str'), + capacity=dict(type='int') +) + + +def get_sku_name(edition): + edition = edition.upper() + if edition == 'FREE': + return 'Free' + elif edition == 'SYSTEM': + return 'GP_Gen5_2' + elif edition in ['BUSINESS', 'SYSTEM2']: + return 'BC_Gen5_2' + elif edition == 'BASIC': + return 'Basic' + elif edition in ['STANDARD', 'WEB']: + return 'S1' + elif edition == 'PREMIUM': + return 'P2' + elif edition == 'STRETCH': + return 'DS100' + elif edition == 'DATA_WAREHOUSE': + return 'DW100c' + else: + return None + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMSqlDatabase(AzureRMModuleBase): + """Configuration class for an Azure RM SQL Database resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + server_name=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + location=dict( + type='str' + ), + collation=dict( + type='str' + ), + create_mode=dict( + type='str', + choices=['copy', + 'default', + 'non_readable_secondary', + 'online_secondary', + 'point_in_time_restore', + 'recovery', + 'restore', + 'restore_long_term_retention_backup'] + ), + source_database_id=dict( + type='str' + ), + source_database_deletion_date=dict( + type='str' + ), + restore_point_in_time=dict( + type='str' + ), + recovery_services_recovery_point_resource_id=dict( + type='str' + ), + edition=dict( + type='str', + choices=['web', + 'business', + 'basic', + 'standard', + 'premium', + 'free', + 'stretch', + 'data_warehouse', + 'system', + 'system2'] + ), + sku=dict( + type='dict', + options=sku_spec + ), + max_size_bytes=dict( + type='str' + ), + elastic_pool_name=dict( + type='str' + ), + read_scale=dict( + type='bool', + default=False + ), + sample_name=dict( + type='str', + choices=['adventure_works_lt'] + ), + zone_redundant=dict( + type='bool', + default=False + ), + force_update=dict( + type='bool' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.server_name = None + self.name = None + self.parameters = dict() + self.tags = None + + self.results = dict(changed=False) + self.state = None + self.to_do = Actions.NoAction + + super(AzureRMSqlDatabase, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()) + ['tags']: + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + if key == "location": + self.parameters["location"] = kwargs[key] + elif key == "collation": + self.parameters["collation"] = kwargs[key] + elif key == "create_mode": + self.parameters["create_mode"] = _snake_to_camel( + kwargs[key], True) + elif key == "source_database_id": + self.parameters["source_database_id"] = kwargs[key] + elif key == "source_database_deletion_date": + try: + self.parameters["source_database_deletion_date"] = dateutil.parser.parse(kwargs[key]) + except dateutil.parser._parser.ParserError: + self.fail("Error parsing date from source_database_deletion_date: {0}".format(kwargs[key])) + elif key == "restore_point_in_time": + try: + self.parameters["restore_point_in_time"] = dateutil.parser.parse(kwargs[key]) + except dateutil.parser._parser.ParserError: + self.fail("Error parsing date from restore_point_in_time: {0}".format(kwargs[key])) + elif key == "recovery_services_recovery_point_resource_id": + self.parameters["recovery_services_recovery_point_resource_id"] = kwargs[key] + elif key == "edition": + ev = get_sku_name(kwargs[key]) + self.parameters["sku"] = Sku(name=ev) + elif key == "sku": + ev = kwargs[key] + self.parameters["sku"] = Sku( + name=ev['name'], tier=ev['tier'], size=ev['size'], family=ev['family'], capacity=ev['capacity']) + elif key == "max_size_bytes": + self.parameters["max_size_bytes"] = kwargs[key] + elif key == "elastic_pool_name": + self.parameters["elastic_pool_id"] = kwargs[key] + elif key == "read_scale": + self.parameters["read_scale"] = 'Enabled' if kwargs[key] else 'Disabled' + elif key == "sample_name": + ev = kwargs[key] + if ev == 'adventure_works_lt': + ev = 'AdventureWorksLT' + self.parameters["sample_name"] = ev + elif key == "zone_redundant": + self.parameters["zone_redundant"] = True if kwargs[key] else False + + old_response = None + response = None + + resource_group = self.get_resource_group(self.resource_group) + + if "location" not in self.parameters: + self.parameters["location"] = resource_group.location + + if "elastic_pool_id" in self.parameters: + self.format_elastic_pool_id() + + old_response = self.get_sqldatabase() + + if not old_response: + self.log("SQL Database instance doesn't exist") + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + self.log("SQL Database instance already exists") + if self.state == 'absent': + self.to_do = Actions.Delete + elif self.state == 'present': + self.log( + "Need to check if SQL Database instance has to be deleted or may be updated") + if ('location' in self.parameters) and (self.parameters['location'] != old_response['location']): + self.to_do = Actions.Update + if (('read_scale' in self.parameters) and + (self.parameters['read_scale'] != old_response['read_scale'])): + self.to_do = Actions.Update + if (('max_size_bytes' in self.parameters) and + (self.parameters['max_size_bytes'] != old_response['max_size_bytes'])): + self.to_do = Actions.Update + if (('sku' in self.parameters) and + (self.parameters['sku'].as_dict() != old_response['sku'])): + self.to_do = Actions.Update + update_tags, newtags = self.update_tags( + old_response.get('tags', dict())) + if update_tags: + self.tags = newtags + self.to_do = Actions.Update + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log("Need to Create / Update the SQL Database instance") + + if self.check_mode: + self.results['changed'] = True + return self.results + + self.parameters['tags'] = self.tags + response = self.create_update_sqldatabase() + + if not old_response: + self.results['changed'] = True + else: + self.results['changed'] = old_response.__ne__(response) + self.log("Creation / Update done") + elif self.to_do == Actions.Delete: + self.log("SQL Database instance deleted") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_sqldatabase() + # make sure instance is actually deleted, for some Azure resources, instance is hanging around + # for some time after deletion -- this should be really fixed in Azure + while self.get_sqldatabase(): + time.sleep(20) + else: + self.log("SQL Database instance unchanged") + self.results['changed'] = False + response = old_response + + if response: + self.results["id"] = response["id"] + self.results["database_id"] = response["database_id"] + self.results["status"] = response["status"] + + return self.results + + def create_update_sqldatabase(self): + ''' + Creates or updates SQL Database with the specified configuration. + + :return: deserialized SQL Database instance state dictionary + ''' + self.log( + "Creating / Updating the SQL Database instance {0}".format(self.name)) + + try: + response = self.sql_client.databases.begin_create_or_update(resource_group_name=self.resource_group, + server_name=self.server_name, + database_name=self.name, + parameters=self.parameters) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + except Exception as exc: + self.log('Error attempting to create the SQL Database instance.') + self.fail( + "Error creating the SQL Database instance: {0}".format(str(exc))) + return response.as_dict() + + def delete_sqldatabase(self): + ''' + Deletes specified SQL Database instance in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the SQL Database instance {0}".format(self.name)) + try: + response = self.sql_client.databases.begin_delete(resource_group_name=self.resource_group, + server_name=self.server_name, + database_name=self.name) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + except Exception as e: + self.log('Error attempting to delete the SQL Database instance.') + self.fail( + "Error deleting the SQL Database instance: {0}".format(str(e))) + + return True + + def get_sqldatabase(self): + ''' + Gets the properties of the specified SQL Database. + + :return: deserialized SQL Database instance state dictionary + ''' + self.log( + "Checking if the SQL Database instance {0} is present".format(self.name)) + found = False + try: + response = self.sql_client.databases.get(resource_group_name=self.resource_group, + server_name=self.server_name, + database_name=self.name) + found = True + self.log("Response : {0}".format(response)) + self.log("SQL Database instance : {0} found".format(response.name)) + except ResourceNotFoundError: + self.log('Did not find the SQL Database instance.') + if found is True: + return response.as_dict() + + return False + + def format_elastic_pool_id(self): + parrent_id = format_resource_id(val=self.server_name, + subscription_id=self.subscription_id, + namespace="Microsoft.Sql", + types="servers", + resource_group=self.resource_group) + self.parameters['elastic_pool_id'] = parrent_id + \ + "/elasticPools/" + self.parameters['elastic_pool_id'] + + +def _snake_to_camel(snake, capitalize_first=False): + if capitalize_first: + return ''.join(x.capitalize() or '_' for x in snake.split('_')) + else: + return snake.split('_')[0] + ''.join(x.capitalize() or '_' for x in snake.split('_')[1:]) + + +def main(): + """Main execution""" + AzureRMSqlDatabase() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqldatabase_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqldatabase_info.py new file mode 100644 index 000000000..9c9af2501 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqldatabase_info.py @@ -0,0 +1,291 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Zim Kalinowski, (@zikalino) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_sqldatabase_info +version_added: "0.1.2" +short_description: Get Azure SQL Database facts +description: + - Get facts of Azure SQL Database. + +options: + resource_group: + description: + - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. + required: True + server_name: + description: + - The name of the server. + required: True + name: + description: + - The name of the database. + elastic_pool_name: + description: + - The name of the elastic pool. + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Get instance of SQL Database + azure_rm_sqldatabase_info: + resource_group: testrg + server_name: testserver + name: testdb + + - name: List instances of SQL Database + azure_rm_sqldatabase_info: + resource_group: testrg + server_name: testserver + elastic_pool_name: testep + + - name: List instances of SQL Database + azure_rm_sqldatabase_info: + resource_group: testrg + server_name: testserver +''' + +RETURN = ''' +databases: + description: + - A list of dictionaries containing facts for SQL Database. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/testrg/providers/Microsoft.Sql/servers/testserver/databases/testdb + name: + description: + - Database name. + returned: always + type: str + sample: testdb + location: + description: + - Resource location. + returned: always + type: str + sample: southeastasia + tags: + description: + - Resource tags. + returned: always + type: dict + sample: { 'taga':'aaa', 'tagb':'bbb' } + sku: + description: + - The name and tier of the SKU. + returned: always + type: complex + contains: + name: + description: + - The name of the SKU. + returned: always + type: str + sample: BC_Gen4_2 + tier: + description: + - The SKU tier. + returned: always + type: str + sample: BusinessCritical + capacity: + description: + - The SKU capacity. + returned: always + type: int + sample: 2 + kind: + description: + - Kind of database. This is metadata used for the Azure portal experience. + returned: always + type: str + sample: v12.0,user + collation: + description: + - The collation of the database. + returned: always + type: str + sample: SQL_Latin1_General_CP1_CI_AS + status: + description: + - The status of the database. + returned: always + type: str + sample: Online + zone_redundant: + description: + - Whether or not this database is zone redundant, which means the replicas of this database will be spread across multiple availability zones. + returned: always + type: bool + sample: true + earliest_restore_date: + description: + - The earliest restore point available for the SQL Database. + returned: always + type: str + sample: '2021-09-01T00:59:59.000Z' +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMSqlDatabaseInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + server_name=dict( + type='str', + required=True + ), + name=dict( + type='str' + ), + elastic_pool_name=dict( + type='str' + ), + tags=dict( + type='list', + elements='str' + ) + ) + # store the results of the module operation + self.results = dict( + changed=False + ) + self.resource_group = None + self.server_name = None + self.name = None + self.elastic_pool_name = None + self.tags = None + super(AzureRMSqlDatabaseInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False, facts_module=True) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_sqldatabase_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_sqldatabase_facts' module has been renamed to 'azure_rm_sqldatabase_info'", version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.name is not None: + self.results['databases'] = self.get() + elif self.elastic_pool_name is not None: + self.results['databases'] = self.list_by_elastic_pool() + else: + self.results['databases'] = self.list_by_server() + return self.results + + def get(self): + response = None + results = [] + try: + response = self.sql_client.databases.get(resource_group_name=self.resource_group, + server_name=self.server_name, + database_name=self.name) + self.log("Response : {0}".format(response)) + except ResourceNotFoundError: + self.log('Could not get facts for Databases.') + + if response and self.has_tags(response.tags, self.tags): + results.append(self.format_item(response)) + + return results + + def list_by_elastic_pool(self): + response = None + results = [] + try: + response = self.sql_client.databases.list_by_elastic_pool(resource_group_name=self.resource_group, + server_name=self.server_name, + elastic_pool_name=self.elastic_pool_name) + self.log("Response : {0}".format(response)) + except Exception: + self.fail('Could not get facts for Databases.') + + if response is not None: + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(self.format_item(item)) + + return results + + def list_by_server(self): + response = None + results = [] + try: + response = self.sql_client.databases.list_by_server(resource_group_name=self.resource_group, + server_name=self.server_name) + self.log("Response : {0}".format(response)) + except Exception: + self.fail('Could not get facts for Databases.') + + if response is not None: + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(self.format_item(item)) + + return results + + def format_item(self, item): + d = item.as_dict() + d = { + 'resource_group': self.resource_group, + 'id': d.get('id', None), + 'name': d.get('name', None), + 'location': d.get('location', None), + 'tags': d.get('tags', None), + 'sku': { + 'name': d.get('current_service_objective_name', None), + 'tier': d.get('sku', {}).get('tier', None), + 'capacity': d.get('sku', {}).get('capacity', None) + }, + 'kind': d.get('kind', None), + 'collation': d.get('collation', None), + 'status': d.get('status', None), + 'zone_redundant': d.get('zone_redundant', None), + 'earliest_restore_date': d.get('earliest_restore_date', None) + } + return d + + +def main(): + AzureRMSqlDatabaseInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlelasticpool.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlelasticpool.py new file mode 100644 index 000000000..563e9111b --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlelasticpool.py @@ -0,0 +1,558 @@ +#!/usr/bin/python +# +# Copyright (c) 2022 xuzhang3 (@xuzhang3), Fred-sun (@Fred-sun) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_sqlelasticpool +version_added: "1.14.0" +short_description: Manage SQL Elastic Pool instance +description: + - Create, update and delete instance of SQL Elastic Pool. + +options: + resource_group: + description: + - The name of the resource group that contains the resource. + - You can obtain this value from the Azure Resource Manager API or the portal. + required: True + type: str + server_name: + description: + - The name of the server. + required: True + type: str + name: + description: + - The name of the elastic pool to be operated on (updated or created). + required: True + type: str + location: + description: + - Resource location. If not set, location from the resource group will be used as default. + type: str + sku: + description: + - The sku of the elastic pool. The Elastic PoolEditions enumeration contains all the valid sku. + type: dict + suboptions: + name: + description: + - Name of the elastic pool SKU, typically, a letter + Number code, e.g. P3 + required: True + type: str + tier: + description: + - The tier or edition of the particular SKU, e.g. Basic, Premium + type: str + capacity: + description: + - Capacity of the particular SKU. + type: int + size: + description: + - Size of the particular SKU + type: str + family: + description: + - If the service has different generations of hardware, for the same SKU, then that can be used here + type: str + max_size_bytes: + description: + - The max size of the elasticpool expressed in bytes. + - If not I(create_mode=default), this value is ignored. + - To see possible values, query the capabilities API (/subscriptions/{subscriptionId}/providers/Microsoft.Sql/locations/{locationID}/capabilities). + referred to by operationId:'Capabilities_ListByLocation'. + type: str + zone_redundant: + description: + - Is this elasticpool is zone redundant? It means the replicas of this elasticpool will be spread across multiple availability zones. + type: bool + default: False + per_elasticpool_settings: + description: + - The per database settings for the elastic pool. + type: dict + suboptions: + min_capacity: + description: + - The minimum capacity all databases are guaranteed. + type: float + max_capacity: + description: + - The maximum capacity all databases are guaranteed. + type: float + license_type: + description: + - The license type to apply for this elastic pool. + type: str + default: LicenseIncluded + choices: + - LicenseIncluded + - BasePrice + maintenance_configuration_id: + description: + - Maintenance configuration id assigned to the elastic pool. + type: str + state: + description: + - Assert the state of the SQL Elastic Pool. Use C(present) to create or update an SQL Elastic Pool and C(absent) to delete it. + default: present + type: str + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - xuzhang3 (@xuzhang3) + - Fred Sun (@Fred-sun) +''' + +EXAMPLES = ''' + - name: Create (or update) SQL Elastic Pool + azure_rm_elastic_pool: + resource_group: myResourceGroup + server_name: sqlcrudtest-5961 + name: testEP + zone_redundant: False + sku: + name: GP_Gen5 + family: Gen5 + capacity: 3 + tags: + key1: value1 + + - name: Delete SQL Elastic Pool + azure_rm_elastic_pool: + resource_group: myResourceGroup + server_name: sqlcrudtest-5961 + name: testEP + state: absent +''' + +RETURN = ''' +elastic_pool: + description: + - A list of dictionaries containing facts for SQL Elastic Pool. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: /subscriptions/xxx-xxx/resourceGroups/testrg/providers/Microsoft.Sql/servers/sqlsrvfredsqldb/elasticPools/fedelastic01 + name: + description: + - Elastic Pool name. + returned: always + type: str + sample: testEP + location: + description: + - Resource location. + returned: always + type: str + sample: eastus + tags: + description: + - Resource tags. + returned: always + type: dict + sample: { 'taga':'aaa', 'tagb':'bbb' } + sku: + description: + - The name and tier of the SKU. + returned: always + type: complex + contains: + name: + description: + - The name of the SKU. + returned: always + type: str + sample: GP_Gen5 + tier: + description: + - The SKU tier. + returned: always + type: str + sample: GeneralPurpose + capacity: + description: + - The SKU capacity. + returned: always + type: int + sample: 2 + family: + description: + - If the service has different generations of hardware, for the same SKU, then that can be captured here. + type: str + returned: always + sample: Gen5 + size: + description: + - Size of the particular SKU. + type: str + returned: always + sample: null + zone_redundant: + description: + - Whether or not this database is zone redundant, which means the replicas of this database will be spread across multiple availability zones. + returned: always + type: bool + sample: true + license_type: + description: + - The license type to apply for this elastic pool. + type: str + returned: always + sample: LicenseIncluded + maintenance_configuration_id: + description: + - Maintenance configuration id assigned to the elastic pool. + type: str + returned: always + sample: /subscriptions/xxx-xxx/providers/Microsoft.Maintenance/publicMaintenanceConfigurations/SQL_Default + max_size_bytes: + description: + - The storage limit for the database elastic pool in bytes. + type: str + returned: always + sample: 34359738368 + per_database_settings: + description: + - The per database settings for the elastic pool. + type: complex + returned: always + contains: + min_capacity: + description: + - The minimum capacity all databases are guaranteed + type: float + returned: always + sample: 0.0 + max_capacity: + description: + - The maximum capacity any one database can consume. + type: float + returned: always + sample: 2.0 +''' + +import time +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.core.polling import LROPoller +except ImportError: + # This is handled in azure_rm_common + pass + +sku_spec = dict( + name=dict(type='str', required=True), + tier=dict(type='str'), + size=dict(type='str'), + family=dict(type='str'), + capacity=dict(type='int') +) + + +per_elasticpool_settings_spec = dict( + min_capacity=dict(type='float'), + max_capacity=dict(type='float') +) + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMSqlElasticPool(AzureRMModuleBase): + """Configuration class for an Azure RM SQL Elastic Pool resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + server_name=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + location=dict( + type='str' + ), + sku=dict( + type='dict', + options=sku_spec + ), + max_size_bytes=dict( + type='str' + ), + zone_redundant=dict( + type='bool', + default=False + ), + per_elasticpool_settings=dict( + type='dict', + options=per_elasticpool_settings_spec + ), + maintenance_configuration_id=dict( + type='str' + ), + license_type=dict( + type='str', + default="LicenseIncluded", + choices=["LicenseIncluded", "BasePrice"] + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.server_name = None + self.name = None + self.location = None + self.sku = None + self.max_size_bytes = None + self.zone_redundant = None + self.per_elasticpool_settings = None + self.maintenance_configuration_id = None + self.body = dict() + + self.results = dict(changed=False) + self.to_do = Actions.NoAction + + super(AzureRMSqlElasticPool, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + if key not in ['resource_group', 'server_name', 'name', 'state']: + self.body[key] = kwargs[key] + + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + # Set default location + self.location = resource_group.location + + old_response = None + response = None + + old_response = self.get_elastic_pool() + + if not old_response: + self.log("SQL Elastic Pool instance doesn't exist") + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + self.log("SQL Elastic Pool instance already exists") + if self.state == 'absent': + self.to_do = Actions.Delete + elif self.state == 'present': + self.log( + "Need to check if SQL Elastic Pool instance has to be deleted or may be updated") + if self.per_elasticpool_settings is not None and (self.body['per_elasticpool_settings'] != old_response['per_elasticpool_settings']): + self.to_do = Actions.Update + if self.maintenance_configuration_id and (self.body['maintenance_configuration_id'] != old_response['maintenance_configuration_id']): + self.to_do = Actions.Update + if self.license_type is not None and (self.body['license_type'] != old_response['license_type']): + self.to_do = Actions.Update + if self.zone_redundant is not None and (bool(self.body['zone_redundant']) != bool(old_response['zone_redundant'])): + self.to_do = Actions.Update + if self.max_size_bytes is not None and (self.body['max_size_bytes'] != old_response['max_size_bytes']): + self.to_do = Actions.Update + if self.sku is not None: + for key in self.sku.keys(): + if self.sku[key] is not None and (self.sku[key] != old_response['sku'][key]): + self.to_do = Actions.Update + else: + self.sku[key] = old_response['sku'][key] + update_tags, newtags = self.update_tags(old_response.get('tags', dict())) + if update_tags: + self.body['tags'] = newtags + self.to_do = Actions.Update + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log("Need to Create / Update the SQL Elastic Pool instance") + + self.results['changed'] = True + + if self.check_mode: + return self.results + + if self.to_do == Actions.Create: + self.body['location'] = self.location + response = self.create_elastic_pool(self.body) + else: + response = self.update_elastic_pool(self.body) + + elif self.to_do == Actions.Delete: + self.log("SQL Elastic Pool instance deleted") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_elastic_pool() + # make sure instance is actually deleted, for some Azure resources, instance is hanging around + # for some time after deletion -- this should be really fixed in Azure + while self.get_elastic_pool(): + time.sleep(20) + else: + self.log("SQL Elastic Pool instance unchanged") + self.results['changed'] = False + response = old_response + + self.results['elastic_pool'] = response + + return self.results + + def update_elastic_pool(self, parameters): + ''' + Creates or updates SQL Elastic Pool with the specified configuration. + + :return: deserialized SQL Elastic Pool instance state dictionary + ''' + self.log( + "Creating / Updating the SQL Elastic Pool instance {0}".format(self.name)) + + try: + response = self.sql_client.elastic_pools.begin_update(resource_group_name=self.resource_group, + server_name=self.server_name, + elastic_pool_name=self.name, + parameters=parameters) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + except Exception as exc: + self.log('Error attempting to create the SQL Elastic Pool instance.') + self.fail( + "Error creating the SQL Elastic Pool instance: {0}".format(str(exc))) + return self.format_item(response) + + def create_elastic_pool(self, parameters): + ''' + Creates or updates SQL Elastic Pool with the specified configuration. + + :return: deserialized SQL Elastic Pool instance state dictionary + ''' + self.log( + "Creating / Updating the SQL Elastic Pool instance {0}".format(self.name)) + + try: + response = self.sql_client.elastic_pools.begin_create_or_update(resource_group_name=self.resource_group, + server_name=self.server_name, + elastic_pool_name=self.name, + parameters=parameters) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + except Exception as exc: + self.log('Error attempting to create the SQL Elastic Pool instance.') + self.fail( + "Error creating the SQL Elastic Pool instance: {0}".format(str(exc))) + return self.format_item(response) + + def delete_elastic_pool(self): + ''' + Deletes specified SQL Elastic Pool instance in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the SQL Elastic Pool instance {0}".format(self.name)) + try: + response = self.sql_client.elastic_pools.begin_delete(resource_group_name=self.resource_group, + server_name=self.server_name, + elastic_pool_name=self.name) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + except Exception as e: + self.log('Error attempting to delete the SQL Elastic Pool instance.') + self.fail( + "Error deleting the SQL Elastic Pool instance: {0}".format(str(e))) + + return True + + def get_elastic_pool(self): + ''' + Gets the properties of the specified SQL Elastic Pool. + + :return: deserialized SQL Elastic Pool instance state dictionary + ''' + found = False + try: + response = self.sql_client.elastic_pools.get(resource_group_name=self.resource_group, + server_name=self.server_name, + elastic_pool_name=self.name) + found = True + except ResourceNotFoundError: + self.log('Did not find the SQL Elastic Pool instance.') + if found is True: + return self.format_item(response) + + return False + + def format_item(self, item): + if not item: + return None + + d = dict( + resource_group=self.resource_group, + id=item.id, + name=item.name, + location=item.location, + tags=item.tags, + max_size_bytes=item.max_size_bytes, + zone_redundant=item.zone_redundant, + license_type=item.license_type, + maintenance_configuration_id=item.maintenance_configuration_id, + per_database_settings=dict(), + sku=dict() + ) + + if item.sku is not None: + d['sku']['name'] = item.sku.name + d['sku']['tier'] = item.sku.tier + d['sku']['size'] = item.sku.size + d['sku']['family'] = item.sku.family + d['sku']['capacity'] = item.sku.capacity + if item.per_database_settings is not None: + d['per_database_settings']['min_capacity'] = item.per_database_settings.min_capacity + d['per_database_settings']['max_capacity'] = item.per_database_settings.max_capacity + + return d + + +def main(): + """Main execution""" + AzureRMSqlElasticPool() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlelasticpool_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlelasticpool_info.py new file mode 100644 index 000000000..4ac9ac86e --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlelasticpool_info.py @@ -0,0 +1,292 @@ +#!/usr/bin/python +# +# Copyright (c) 2022 xuzhang3 (@xuzhang3), Fred-sun (@Fred-sun) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_sqlelasticpool_info +version_added: "1.14.0" +short_description: Get Azure SQL Elastic Pool facts +description: + - Get facts of Azure SQL Elastic Pool. + +options: + resource_group: + description: + - The name of the resource group that contains the resource. + - You can obtain this value from the Azure Resource Manager API or the portal. + type: str + required: True + server_name: + description: + - The name of the server. + type: str + required: True + name: + description: + - The name of the elastic pool. + type: str + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - xuzhang3 (@xuzhang3) + - Fred Sun (@Fred-sun) + +''' + +EXAMPLES = ''' + - name: Get instance of SQL Elastic Pool + azure_rm_sqlelasticpool_info: + resource_group: testrg + server_name: testserver + name: testEP + + - name: List instances of SQL Elastic Pool + azure_rm_sqlelasticpool_info: + resource_group: testrg + server_name: testserver +''' + +RETURN = ''' +elastic_pool: + description: + - A list of dictionaries containing facts for SQL Elastic Pool. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: /subscriptions/xxx-xxx/resourceGroups/testrg/providers/Microsoft.Sql/servers/sqlsrvfredsqldb/elasticPools/fedelastic01 + name: + description: + - Elastic Pool name. + returned: always + type: str + sample: testEP + location: + description: + - Resource location. + returned: always + type: str + sample: eastus + tags: + description: + - Resource tags. + returned: always + type: dict + sample: { 'taga':'aaa', 'tagb':'bbb' } + sku: + description: + - The name and tier of the SKU. + returned: always + type: complex + contains: + name: + description: + - The name of the SKU. + returned: always + type: str + sample: GP_Gen5 + tier: + description: + - The SKU tier. + returned: always + type: str + sample: GeneralPurpose + capacity: + description: + - The SKU capacity. + returned: always + type: int + sample: 2 + family: + description: + - If the service has different generations of hardware, for the same SKU, then that can be captured here. + type: str + returned: always + sample: Gen5 + size: + description: + - Size of the particular SKU. + type: str + returned: always + sample: null + zone_redundant: + description: + - Whether or not this database is zone redundant, which means the replicas of this database will be spread across multiple availability zones. + returned: always + type: bool + sample: true + license_type: + description: + - The license type to apply for this elastic pool. + type: str + returned: always + sample: LicenseIncluded + maintenance_configuration_id: + description: + - Maintenance configuration id assigned to the elastic pool. + type: str + returned: always + sample: /subscriptions/xxx-xxx/providers/Microsoft.Maintenance/publicMaintenanceConfigurations/SQL_Default + max_size_bytes: + description: + - The storage limit for the database elastic pool in bytes. + type: str + returned: always + sample: 34359738368 + per_database_settings: + description: + - The per database settings for the elastic pool. + type: complex + returned: always + contains: + min_capacity: + description: + - The minimum capacity all databases are guaranteed + type: float + returned: always + sample: 0.0 + max_capacity: + description: + - The maximum capacity any one database can consume. + type: float + returned: always + sample: 2.0 +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMSqlElasticPoolInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + server_name=dict( + type='str', + required=True + ), + name=dict( + type='str' + ), + tags=dict( + type='list', + elements='str' + ) + ) + # store the results of the module operation + self.results = dict( + changed=False + ) + self.resource_group = None + self.server_name = None + self.name = None + self.tags = None + super(AzureRMSqlElasticPoolInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False, facts_module=True) + + def exec_module(self, **kwargs): + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.name is not None: + self.results['elastic_pool'] = self.get() + else: + self.results['elastic_pool'] = self.list_by_server() + return self.results + + def get(self): + response = None + results = [] + try: + response = self.sql_client.elastic_pools.get(resource_group_name=self.resource_group, + server_name=self.server_name, + elastic_pool_name=self.name) + self.log("Response : {0}".format(response)) + except ResourceNotFoundError: + self.log('Could not get facts for Elastic Pool.') + + if response and self.has_tags(response.tags, self.tags): + results.append(self.format_item(response)) + + return results + + def list_by_server(self): + response = None + results = [] + try: + response = self.sql_client.elastic_pools.list_by_server(resource_group_name=self.resource_group, + server_name=self.server_name) + self.log("Response : {0}".format(response)) + except Exception: + self.fail('Could not get facts for elastic pool.') + + if response is not None: + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(self.format_item(item)) + + return results + + def format_item(self, item): + if not item: + return None + + d = dict( + resource_group=self.resource_group, + id=item.id, + name=item.name, + location=item.location, + tags=item.tags, + max_size_bytes=item.max_size_bytes, + zone_redundant=item.zone_redundant, + license_type=item.license_type, + maintenance_configuration_id=item.maintenance_configuration_id, + per_database_settings=dict(), + sku=dict() + ) + + if item.sku is not None: + d['sku']['name'] = item.sku.name + d['sku']['tier'] = item.sku.tier + d['sku']['size'] = item.sku.size + d['sku']['family'] = item.sku.family + d['sku']['capacity'] = item.sku.capacity + if item.per_database_settings is not None: + d['per_database_settings']['min_capacity'] = item.per_database_settings.min_capacity + d['per_database_settings']['max_capacity'] = item.per_database_settings.max_capacity + + return d + + +def main(): + AzureRMSqlElasticPoolInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlfirewallrule.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlfirewallrule.py new file mode 100644 index 000000000..059256a07 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlfirewallrule.py @@ -0,0 +1,268 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Zim Kalinowski, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_sqlfirewallrule +version_added: "0.1.2" +short_description: Manage Firewall Rule instance +description: + - Create, update and delete instance of Firewall Rule. + +options: + resource_group: + description: + - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. + required: True + server_name: + description: + - The name of the server. + required: True + name: + description: + - The name of the firewall rule. + required: True + start_ip_address: + description: + - The start IP address of the firewall rule. + - Must be IPv4 format. Use value C(0.0.0.0) to represent all Azure-internal IP addresses. + end_ip_address: + description: + - The end IP address of the firewall rule. + - Must be IPv4 format. Must be greater than or equal to I(start_ip_address). Use value C(0.0.0.0) to represent all Azure-internal IP addresses. + state: + description: + - State of the SQL Database. Use C(present) to create or update an SQL Database and C(absent) to delete it. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Create (or update) Firewall Rule + azure_rm_sqlfirewallrule: + resource_group: myResourceGroup + server_name: firewallrulecrudtest-6285 + name: firewallrulecrudtest-5370 + start_ip_address: 172.28.10.136 + end_ip_address: 172.28.10.138 +''' + +RETURN = ''' +id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Sql/servers/firewallrulecrudtest-628 + 5/firewallRules/firewallrulecrudtest-5370" +''' + +import time +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.core.polling import LROPoller + from azure.mgmt.sql.models import FirewallRule +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMSqlFirewallRule(AzureRMModuleBase): + """Configuration class for an Azure RM Firewall Rule resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + server_name=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + start_ip_address=dict( + type='str' + ), + end_ip_address=dict( + type='str' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.server_name = None + self.name = None + self.start_ip_address = None + self.end_ip_address = None + + self.results = dict(changed=False) + self.state = None + self.to_do = Actions.NoAction + + super(AzureRMSqlFirewallRule, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=False) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()): + if hasattr(self, key): + setattr(self, key, kwargs[key]) + + old_response = self.get_firewallrule() + response = None + + if not old_response: + self.log("Firewall Rule instance doesn't exist") + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + self.log("Firewall Rule instance already exists") + if self.state == 'absent': + self.to_do = Actions.Delete + elif self.state == 'present': + self.log("Need to check if Firewall Rule instance has to be deleted or may be updated") + if (self.start_ip_address is not None) and (self.start_ip_address != old_response['start_ip_address']): + self.to_do = Actions.Update + if (self.end_ip_address is not None) and (self.end_ip_address != old_response['end_ip_address']): + self.to_do = Actions.Update + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log("Need to Create / Update the Firewall Rule instance") + + if self.check_mode: + self.results['changed'] = True + return self.results + + response = self.create_update_firewallrule() + + if not old_response: + self.results['changed'] = True + else: + self.results['changed'] = old_response.__ne__(response) + self.log("Creation / Update done") + elif self.to_do == Actions.Delete: + self.log("Firewall Rule instance deleted") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_firewallrule() + # make sure instance is actually deleted, for some Azure resources, instance is hanging around + # for some time after deletion -- this should be really fixed in Azure + while self.get_firewallrule(): + time.sleep(20) + else: + self.log("Firewall Rule instance unchanged") + self.results['changed'] = False + response = old_response + + if response: + self.results["id"] = response["id"] + + return self.results + + def create_update_firewallrule(self): + ''' + Creates or updates Firewall Rule with the specified configuration. + + :return: deserialized Firewall Rule instance state dictionary + ''' + self.log("Creating / Updating the Firewall Rule instance {0}".format(self.name)) + + try: + params = FirewallRule( + name=self.name, + start_ip_address=self.start_ip_address, + end_ip_address=self.end_ip_address, + ) + response = self.sql_client.firewall_rules.create_or_update(resource_group_name=self.resource_group, + server_name=self.server_name, + firewall_rule_name=self.name, + parameters=params) + except Exception as exc: + self.log('Error attempting to create the Firewall Rule instance.') + self.fail("Error creating the Firewall Rule instance: {0}".format(str(exc))) + return response.as_dict() + + def delete_firewallrule(self): + ''' + Deletes specified Firewall Rule instance in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the Firewall Rule instance {0}".format(self.name)) + try: + self.sql_client.firewall_rules.delete(resource_group_name=self.resource_group, + server_name=self.server_name, + firewall_rule_name=self.name) + except Exception as e: + self.log('Error attempting to delete the Firewall Rule instance.') + self.fail("Error deleting the Firewall Rule instance: {0}".format(str(e))) + + return True + + def get_firewallrule(self): + ''' + Gets the properties of the specified Firewall Rule. + + :return: deserialized Firewall Rule instance state dictionary + ''' + self.log("Checking if the Firewall Rule instance {0} is present".format(self.name)) + found = False + try: + response = self.sql_client.firewall_rules.get(resource_group_name=self.resource_group, + server_name=self.server_name, + firewall_rule_name=self.name) + found = True + self.log("Response : {0}".format(response)) + self.log("Firewall Rule instance : {0} found".format(response.name)) + except ResourceNotFoundError: + self.log('Did not find the Firewall Rule instance.') + if found is True: + return response.as_dict() + + return False + + +def main(): + """Main execution""" + AzureRMSqlFirewallRule() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlfirewallrule_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlfirewallrule_info.py new file mode 100644 index 000000000..7c9bb0459 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlfirewallrule_info.py @@ -0,0 +1,207 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Zim Kalinowski, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_sqlfirewallrule_info +version_added: "0.1.2" +short_description: Get Azure SQL Firewall Rule facts +description: + - Get facts of SQL Firewall Rule. + +options: + resource_group: + description: + - The name of the resource group that contains the server. + required: True + server_name: + description: + - The name of the server. + required: True + name: + description: + - The name of the firewall rule. + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Get instance of SQL Firewall Rule + azure_rm_sqlfirewallrule_info: + resource_group: myResourceGroup + server_name: testserver + name: testrule + + - name: List instances of SQL Firewall Rule + azure_rm_sqlfirewallrule_info: + resource_group: myResourceGroup + server_name: testserver +''' + +RETURN = ''' +rules: + description: + - A list of dict results containing the facts for matching SQL firewall rules. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Sql/servers/testser + ver/firewallRules/testrule" + resource_group: + description: + - Resource group name. + returned: always + type: str + sample: testgroup + server_name: + description: + - SQL server name. + returned: always + type: str + sample: testserver + name: + description: + - Firewall rule name. + returned: always + type: str + sample: testrule + start_ip_address: + description: + - The start IP address of the firewall rule. + returned: always + type: str + sample: 10.0.0.1 + end_ip_address: + description: + - The end IP address of the firewall rule. + returned: always + type: str + sample: 10.0.0.5 +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMSqlFirewallRuleInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + server_name=dict( + type='str', + required=True + ), + name=dict( + type='str' + ) + ) + # store the results of the module operation + self.results = dict( + changed=False + ) + self.resource_group = None + self.server_name = None + self.name = None + super(AzureRMSqlFirewallRuleInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_sqlfirewallrule_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_sqlfirewallrule_facts' module has been renamed to 'azure_rm_sqlfirewallrule_info'", version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if (self.name is not None): + self.results['rules'] = self.get() + else: + self.results['rules'] = self.list_by_server() + return self.results + + def get(self): + ''' + Gets facts of the specified SQL Firewall Rule. + + :return: deserialized SQL Firewall Ruleinstance state dictionary + ''' + response = None + results = [] + try: + response = self.sql_client.firewall_rules.get(resource_group_name=self.resource_group, + server_name=self.server_name, + firewall_rule_name=self.name) + self.log("Response : {0}".format(response)) + except ResourceNotFoundError: + self.log('Could not get facts for FirewallRules.') + + if response is not None: + results.append(self.format_item(response)) + + return results + + def list_by_server(self): + ''' + Gets facts of the specified SQL Firewall Rule. + + :return: deserialized SQL Firewall Ruleinstance state dictionary + ''' + response = None + results = [] + try: + response = self.sql_client.firewall_rules.list_by_server(resource_group_name=self.resource_group, + server_name=self.server_name) + self.log("Response : {0}".format(response)) + except ResourceNotFoundError: + self.log('Could not get facts for FirewallRules.') + + if response is not None: + for item in response: + results.append(self.format_item(item)) + + return results + + def format_item(self, item): + d = item.as_dict() + d = { + 'id': d['id'], + 'resource_group': self.resource_group, + 'server_name': self.server_name, + 'name': d['name'], + 'start_ip_address': d['start_ip_address'], + 'end_ip_address': d['end_ip_address'] + } + return d + + +def main(): + AzureRMSqlFirewallRuleInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmanagedinstance.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmanagedinstance.py new file mode 100644 index 000000000..c9d905343 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmanagedinstance.py @@ -0,0 +1,800 @@ +#!/usr/bin/python +# +# Copyright (c) 2022 xuzhang3 (@xuzhang3), Fred-sun (@Fred-sun) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_sqlmanagedinstance +version_added: '1.14.0' +short_description: Manage SQL managed instances +description: + - Create, update, or delete SQL managed instances. + +options: + resource_group: + description: + - The name of the resource group. + type: str + required: True + name: + description: + - The name of the sql managed instance. + type: str + required: True + location: + description: + - The location of the sql managed instance. + type: str + sku: + description: + - An ARM Resource SKU. + type: dict + suboptions: + name: + description: + - The name of the SKU, typically, a letter add Number code. + type: str + tier: + description: + - The tier or edition of the particular SKU. + type: str + size: + description: + - Size of the particular SKU. + type: str + family: + description: + - If the service has different generations of hardware, for the same SKU, then that can be captured here. + type: str + capacity: + description: + - The capacity of the managed instance in integer number of vcores. + type: str + administrators: + description: + - The Azure Active Directory administrator of the server. + type: str + identity: + description: + - Azure Active Directory identity configuration for a resource. + type: dict + suboptions: + user_assigned_identities: + description: + - The resource ids of the user assigned identities to use. + type: str + principal_id: + description: + - The Azure Active Directory principal ID. + type: str + type: + description: + - The identity type. + - Set this to C(SystemAssigned) in order to automatically create and assign an Azure Active Directory principal for the resource. + type: str + tenant_id: + description: + - The Azure Active Directory tenant id. + type: str + managed_instance_create_mode: + description: + - Specifies the mode of database creation. + type: str + administrator_login: + description: + - Administrator username for the managed instance. + - Can only be specified when the managed instance is being created (and is required for creation). + type: str + administrator_login_password: + description: + - The administrator login password (required for managed instance creation). + type: str + subnet_id: + description: + - Subnet resource ID for the managed instance. + type: str + license_type: + description: + - The license type. + - Possible values are C(LicenseIncluded) and C(BasePrice). + - Discounted AHB price for bringing your own SQL licenses. + - Regular price inclusive of a new SQL license. + type: str + choices: + - LicenseIncluded + - BasePrice + v_cores: + description: + - The number of vCores. + type: int + choices: + - 8 + - 16 + - 24 + - 32 + - 40 + - 64 + - 80 + storage_size_in_gb: + description: + - Storage size in GB. + - Minimum value is C(32). Maximum value is C(8192). + - Increments of 32 GB allowed only. + type: int + collation: + description: + - Collation of the managed instance. + type: str + dns_zone: + description: + - The Dns Zone that the managed instance is in. + type: str + dns_zone_partner: + description: + - The resource ID of another managed instance whose DNS zone this managed instance will share after creation. + type: str + public_data_endpoint_enabled: + description: + - Whether or not the public data endpoint is enabled. + type: bool + source_managed_instance_id: + description: + - The resource identifier of the source managed instance associated with create operation of this instance. + type: str + restore_point_in_time: + description: + - Specifies the point in time (ISO8601 format) of the source database that will be restored to create the new database. + type: str + proxy_override: + description: + - Connection type used for connecting to the instance. + type: str + choices: + - Proxy + - Redirect + - Default + timezone_id: + description: + - ID of the timezone. + - Allowed values are timezones supported by Windows. + - Windows keeps details on supported timezones. + type: str + instance_pool_id: + description: + - The ID of the instance pool this managed server belongs to. + type: str + private_endpoint_connections: + description: + - List of private endpoint connections on a managed instance. + type: list + elements: str + maintenance_configuration_id: + description: + - Specifies maintenance configuration ID to apply to this managed instance. + type: str + minimal_tls_version: + description: + - Minimal TLS version. Allowed values C(None), C(1.0), C(1.1), C(1.2). + type: str + choices: + - 'None' + - '1.0' + - '1.1' + - '1.2' + storage_account_type: + description: + - The storage account type used to store backups for this instance. + type: str + zone_redundant: + description: + - Whether or not the multi-az is enabled. + type: bool + primary_user_assigned_identity_id: + description: + - The resource id of a user assigned identity to be used by default. + type: str + key_id: + description: + - A CMK URI of the key to use for encryption. + type: str + state: + description: + - State of the sql managed instance. + - Use C(present) to create or update a automation runbook and use C(absent) to delete. + type: str + default: present + choices: + - present + - absent + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - xuzhang3 (@xuzhang3) + - Fred Sun (@Fred-sun) +''' + +EXAMPLES = ''' +- name: Create sql managed instance + azure_rm_sqlmanagedinstance: + resource_group: "{{ resource_group }}" + name: testmanagedinstance + subnet_id: subnet_id + sku: + name: GP_Gen5 + tier: GeneralPurpose + family: Gen5 + capacity: 5 + identity: + type: SystemAssigned + administrator_login: azureuser + administrator_login_password: Ft@password0329test + storage_size_in_gb: 256 + v_cores: 8 + +- name: Delete sql managed instance + azure_rm_sqlmanagedinstance: + resource_group: "{{ resource_group }}" + name: testmanagedinstance + state: absent +''' + +RETURN = ''' +sql_managed_instance: + description: + - A list of dictionaries containing facts for SQL Managed Instance. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscription/xxx-xxx/resourceGroups/testRG/providers/Microsoft.Sql/managedInstances/fredsqlinstance" + name: + description: + - SQL manged instance name. + returned: always + type: str + sample: testmanagedinstance + location: + description: + - Resource location. + returned: always + type: str + sample: eastus + tags: + description: + - Resource tags. + returned: always + type: dict + sample: { 'taga':'aaa', 'tagb':'bbb' } + identity: + description: + - Azure Active Directory identity configuration for a resource. + returned: always + type: complex + contains: + principal_id: + description: + - The Azure Active Directory principal ID. + type: str + returned: always + sample: 895c-xxx-xxxbe + tenant_id: + description: + - The Azure Active Directory tenant ID. + type: str + returned: always + sample: 72fxxxxx-xxxx-xxxx-xxxx-xxxxxx11db47 + type: + description: + - The identity type. + type: str + returned: always + sample: SystemAssigned + user_assigned_identities: + description: + - The resource ids of the user assigned identities to use. + type: str + returned: always + sample: null + sku: + description: + - An ARM Resource SKU. + returned: always + type: complex + contains: + name: + description: + - The name of the SKU. + returned: always + type: str + sample: BC_Gen4_2 + tier: + description: + - The SKU tier. + returned: always + type: str + sample: BusinessCritical + capacity: + description: + - The SKU capacity. + returned: always + type: int + sample: 2 + family: + description: + - If the service has different generations of hardware, for the same SKU, then that can be captured here. + type: str + returned: always + sample: Gen5 + size: + description: + - Size of the particular SKU. + type: str + returned: always + sample: null + collation: + description: + - The collation of the SQL managed instance. + returned: always + type: str + sample: SQL_Latin1_General_CP1_CI_AS + administrator_login: + description: + - Administrator username for the managed instance. + type: str + returned: always + sample: azureuser + administrators: + description: + - The Azure Active Directory administrator of the server. + type: str + returned: always + sample: null + dns_zone: + description: + -The Dns Zone that the managed instance is in. + type: str + returned: always + sample: 8a23abba54cd + dns_zone_partner: + description: + - The resource ID of another managed instance whose DNS zone this managed instance will share after creation. + type: str + returned: always + sample: null + fully_qualified_domain_name: + description: + - The fully qualified domain name of the managed instance. + type: str + returned: always + sample: fredsqlinstance.8a23abba54cd.database.windows.net + instance_pool_id: + description: + - The ID of the instance pool this managed server belongs to. + type: str + returned: always + sample: null + key_id: + description: + - A CMK URI of the key to use for encryption. + type: str + returned: always + sample: null + license_type: + description: + - The license type. + type: str + returned: always + sample: LicenseIncluded + maintenance_configuration_id: + description: + - Specifies maintenance configuration ID to apply to this managed instance. + type: str + returned: always + sample: /subscriptions/xxx-xxxx/providers/Microsoft.Maintenance/publicMaintenanceConfigurations/SQL_Default + managed_instance_create_mode: + description: + - Specifies the mode of database creation. + type: str + returned: always + sample: null + minimal_tls_version: + description: + - Minimal TLS version. Allowed values 'None', '1.0', '1.1', '1.2'. + type: str + returned: always + sample: 1.2 + primary_user_assigned_identity_id: + description: + - The resource id of a user assigned identity to be used by default. + type: str + returned: always + sample: null + private_endpoint_connections: + description: + - List of private endpoint connections on a managed instance. + type: list + returned: always + sample: [] + provisioning_state: + description: + - The Status of the SQL managed instance. + type: str + returned: always + sample: Successed + proxy_override: + description: + - Connection type used for connecting to the instance. + type: str + returned: always + sample: Proxy + public_data_endpoint_enabled: + description: + - Whether or not the public data endpoint is enabled. + type: bool + returned: always + sample: false + restore_point_in_time: + description: + - Specifies the point in time (ISO8601 format) of the source database that will be restored to create the new database. + type: str + returned: always + sample: null + source_managed_instance_id: + description: + - The resource identifier of the source managed instance associated with create operation of this instance. + type: str + returned: always + sample: null + state: + description: + - The state of the managed instance. + type: str + returned: always + sample: Ready + storage_account_type: + description: + - The storage account type used to store backups for this instance. + type: str + returned: always + sample: GRS + storage_size_in_gb: + description: + - Storage size in GB. Minimum value 32. Maximum value 8192. + type: int + returned: always + sample: 256 + subnet_id: + description: + - Subnet resource ID for the managed instance. + type: str + returned: always + sample: /subscriptions/xxx-xxxx/resourceGroups/testRG/providers/Microsoft.Network/virtualNetworks/vnet-smi/subnets/sqi_sub + timezone_id: + description: + - Id of the timezone. Allowed values are timezones supported by Windows. + type: str + returned: always + sample: UTC + type: + description: + - The SQL managed instance type. + type: str + returned: always + sample: "Microsoft.Sql/managedInstances" + v_cores: + description: + - The number of vCores. Allowed values 8, 16, 24, 32, 40, 64, 80. + type: int + returned: always + sample: 8 + zone_redundant: + description: + - Whether or not the multi-az is enabled. + type: bool + returned: always + sample: false +''' + +# from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.core.polling import LROPoller +except ImportError: + pass + + +sku_spec = dict( + name=dict(type='str'), + tier=dict(type='str'), + size=dict(type='str'), + family=dict(type='str'), + capacity=dict(type='str') +) + + +identity_spec = dict( + user_assigned_identities=dict(type='str'), + principal_id=dict(type='str'), + type=dict(type='str'), + tenant_id=dict(type='str') +) + + +# class AzureRMSqlManagedInstance(AzureRMModuleBase): +class AzureRMSqlManagedInstance(AzureRMModuleBaseExt): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + location=dict( + type='str' + ), + subnet_id=dict( + type='str' + ), + identity=dict( + type='dict', + options=identity_spec + ), + sku=dict( + type='dict', + options=sku_spec + ), + managed_instance_create_mode=dict( + type='str' + ), + administrator_login=dict( + type='str', + ), + administrator_login_password=dict( + type='str', + no_log=True, + ), + license_type=dict( + type='str', + choices=['LicenseIncluded', 'BasePrice'] + ), + v_cores=dict( + type='int', + choices=[8, 16, 24, 32, 40, 64, 80] + ), + storage_size_in_gb=dict( + type='int' + ), + collation=dict( + type='str' + ), + dns_zone=dict( + type='str' + ), + dns_zone_partner=dict( + type='str' + ), + public_data_endpoint_enabled=dict( + type='bool' + ), + source_managed_instance_id=dict( + type='str' + ), + restore_point_in_time=dict( + type='str' + ), + proxy_override=dict( + type='str', + choices=['Proxy', 'Redirect', 'Default'] + ), + timezone_id=dict( + type='str' + ), + instance_pool_id=dict( + type='str' + ), + maintenance_configuration_id=dict( + type='str' + ), + private_endpoint_connections=dict( + type='list', + elements='str' + ), + minimal_tls_version=dict( + type='str', + choices=['None', '1.0', '1.1', '1.2'] + ), + storage_account_type=dict( + type='str' + ), + zone_redundant=dict( + type='bool' + ), + primary_user_assigned_identity_id=dict( + type='str' + ), + key_id=dict( + type='str' + ), + administrators=dict( + type='str' + ), + state=dict( + type='str', + choices=['present', 'absent'], + default='present' + ) + ) + # store the results of the module operation + self.results = dict(changed=False) + self.resource_group = None + self.name = None + self.location = None + self.state = None + self.body = dict() + + super(AzureRMSqlManagedInstance, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=True) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec) + ['tags']: + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + self.body[key] = kwargs[key] + + self.inflate_parameters(self.module_arg_spec, self.body, 0) + + if not self.location: + resource_group = self.get_resource_group(self.resource_group) + self.location = resource_group.location + self.body['location'] = self.location + + sql_managed_instance = self.get() + changed = False + if self.state == 'present': + if sql_managed_instance: + modifiers = {} + self.create_compare_modifiers(self.module_arg_spec, '', modifiers) + self.results['modifiers'] = modifiers + self.results['compare'] = [] + if not self.default_compare(modifiers, self.body, sql_managed_instance, '', self.results): + changed = True + + if changed: + if not self.check_mode: + # sql_managed_instance = self.update_sql_managed_instance(self.body) + sql_managed_instance = self.create_or_update(self.body) + else: + changed = True + if not self.check_mode: + sql_managed_instance = self.create_or_update(self.body) + + else: + changed = True + if not self.check_mode: + sql_managed_instance = self.delete_sql_managed_instance() + + self.results['changed'] = changed + self.results['state'] = sql_managed_instance + return self.results + + def get(self): + try: + response = self.sql_client.managed_instances.get(self.resource_group, self.name) + return self.to_dict(response) + except ResourceNotFoundError: + pass + + def update_sql_managed_instance(self, parameters): + try: + response = self.sql_client.managed_instances.begin_update(resource_group_name=self.resource_group, + managed_instance_name=self.name, + parameters=parameters) + try: + response = self.sql_client.managed_instances.get(resource_group_name=self.resource_group, + managed_instance_name=self.name) + except ResourceNotFoundError: + self.fail("The resource created failed, can't get the facts") + return self.to_dict(response) + except Exception as exc: + self.fail('Error when updating SQL managed instance {0}: {1}'.format(self.name, exc.message)) + + def create_or_update(self, parameters): + try: + response = self.sql_client.managed_instances.begin_create_or_update(resource_group_name=self.resource_group, + managed_instance_name=self.name, + parameters=parameters) + try: + response = self.sql_client.managed_instances.get(resource_group_name=self.resource_group, + managed_instance_name=self.name) + except ResourceNotFoundError: + self.fail("The resource created failed, can't get the facts") + return self.to_dict(response) + except Exception as exc: + self.fail('Error when creating SQL managed instance {0}: {1}'.format(self.name, exc)) + + def delete_sql_managed_instance(self): + try: + response = self.sql_client.managed_instances.begin_delete(self.resource_group, self.name) + except Exception as exc: + self.fail('Error when deleting SQL managed instance {0}: {1}'.format(self.name, exc)) + + def to_dict(self, item): + if not item: + return None + d = item.as_dict() + d = { + 'resource_group': self.resource_group, + 'id': d.get('id', None), + 'name': d.get('name', None), + 'location': d.get('location', None), + 'type': d.get('type', None), + 'tags': d.get('tags', None), + 'identity': { + 'user_assigned_identities': d.get('identity', {}).get('user_assigned_identities', None), + 'principal_id': d.get('identity', {}).get('principal_id', None), + 'type': d.get('identity', {}).get('type', None), + 'tenant_id': d.get('identity', {}).get('tenant_id', None) + }, + 'sku': { + 'name': d.get('sku', {}).get('name', None), + 'size': d.get('sku', {}).get('size', None), + 'family': d.get('sku', {}).get('family', None), + 'tier': d.get('sku', {}).get('tier', None), + 'capacity': d.get('sku', {}).get('capacity', None) + }, + 'provisioning_state': d.get('provisioning_state', None), + 'managed_instance_create_mode': d.get('managed_instance_create_mode', None), + 'fully_qualified_domain_name': d.get('fully_qualified_domain_name', None), + 'administrator_login': d.get('administrator_login', None), + 'subnet_id': d.get('subnet_id', None), + 'state': d.get('state', None), + 'license_type': d.get('license_type', None), + 'v_cores': d.get('v_cores', None), + 'storage_size_in_gb': d.get('storage_size_in_gb', None), + 'collation': d.get('collation', None), + 'dns_zone': d.get('dns_zone', None), + 'dns_zone_partner': d.get('dns_zone_partner', None), + 'public_data_endpoint_enabled': d.get('public_data_endpoint_enabled', None), + 'source_managed_instance_id': d.get('source_managed_instance_id', None), + 'restore_point_in_time': d.get('restore_point_in_time', None), + 'proxy_override': d.get('proxy_override', None), + 'timezone_id': d.get('timezone_id', None), + 'instance_pool_id': d.get('instance_pool_id', None), + 'maintenance_configuration_id': d.get('maintenance_configuration_id', None), + 'private_endpoint_connections': d.get('private_endpoint_connections', None), + 'minimal_tls_version': d.get('minimal_tls_version', None), + 'storage_account_type': d.get('storage_account_type', None), + 'zone_redundant': d.get('zone_redundant', None), + 'primary_user_assigned_identity_id': d.get('primary_user_assigned_identity_id', None), + 'key_id': d.get('key_id', None), + 'administrators': d.get('administrators', None) + } + return d + + +def main(): + AzureRMSqlManagedInstance() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmanagedinstance_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmanagedinstance_info.py new file mode 100644 index 000000000..ad0b11012 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmanagedinstance_info.py @@ -0,0 +1,468 @@ +#!/usr/bin/python +# +# Copyright (c) 2022 xuzhang3 (@xuzhang3), Fred-sun (@Fred-sun) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_sqlmanagedinstance_info +version_added: "0.15.0" +short_description: Get Azure SQL managed instance facts +description: + - Get facts of Azure SQL manged instance facts. + +options: + resource_group: + description: + - The name of the resource group that contains the resource. + type: str + name: + description: + - The name of the SQL managed instance. + type: str + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - xuzhang3 (@xuzhang3) + - Fred-sun (@Fred-sun) +''' + +EXAMPLES = ''' + - name: Get SQL managed instance by name + azure_rm_sqlmanagedinstance_info: + resource_group: testrg + name: testinstancename + + - name: List SQL managed instance by resource group + azure_rm_sqlmanagedinstance_info: + resource_group: testrg + + - name: List SQL manged instance by subscription and filter by tags + azure_rm_sqlmanagedinstance_info: + tags: + - foo +''' + +RETURN = ''' +sql_managed_instance: + description: + - A list of dictionaries containing facts for SQL Managed Instance. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscription/xxx-xxx/resourceGroups/testRG/providers/Microsoft.Sql/managedInstances/fredsqlinstance" + name: + description: + - SQL manged instance name. + returned: always + type: str + sample: testmanagedinstance + location: + description: + - Resource location. + returned: always + type: str + sample: eastus + tags: + description: + - Resource tags. + returned: always + type: dict + sample: { 'taga':'aaa', 'tagb':'bbb' } + identity: + description: + - Azure Active Directory identity configuration for a resource. + returned: always + type: complex + contains: + principal_id: + description: + - The Azure Active Directory principal ID. + type: str + returned: always + sample: 895c-xxx-xxxbe + tenant_id: + description: + - The Azure Active Directory tenant ID. + type: str + returned: always + sample: 72fxxxxx-xxxx-xxxx-xxxx-xxxxxx11db47 + type: + description: + - The identity type. + type: str + returned: always + sample: SystemAssigned + user_assigned_identities: + description: + - The resource ids of the user assigned identities to use. + type: str + returned: always + sample: null + sku: + description: + - An ARM Resource SKU. + returned: always + type: complex + contains: + name: + description: + - The name of the SKU. + returned: always + type: str + sample: BC_Gen4_2 + tier: + description: + - The SKU tier. + returned: always + type: str + sample: BusinessCritical + capacity: + description: + - The SKU capacity. + returned: always + type: int + sample: 2 + family: + description: + - If the service has different generations of hardware, for the same SKU, then that can be captured here. + type: str + returned: always + sample: Gen5 + size: + description: + - Size of the particular SKU. + type: str + returned: always + sample: null + collation: + description: + - The collation of the SQL managed instance. + returned: always + type: str + sample: SQL_Latin1_General_CP1_CI_AS + administrator_login: + description: + - Administrator username for the managed instance. + type: str + returned: always + sample: azureuser + administrators: + description: + - The Azure Active Directory administrator of the server. + type: str + returned: always + sample: null + dns_zone: + description: + -The Dns Zone that the managed instance is in. + type: str + returned: always + sample: 8a23abba54cd + dns_zone_partner: + description: + - The resource id of another managed instance whose DNS zone this managed instance will share after creation. + type: str + returned: always + sample: null + fully_qualified_domain_name: + description: + - The fully qualified domain name of the managed instance. + type: str + returned: always + sample: fredsqlinstance.8a23abba54cd.database.windows.net + instance_pool_id: + description: + - The ID of the instance pool this managed server belongs to. + type: str + returned: always + sample: null + key_id: + description: + - A CMK URI of the key to use for encryption. + type: str + returned: always + sample: null + license_type: + description: + - The license type. + type: str + returned: always + sample: LicenseIncluded + maintenance_configuration_id: + description: + - Specifies maintenance configuration ID to apply to this managed instance. + type: str + returned: always + sample: /subscriptions/xxx-xxxx/providers/Microsoft.Maintenance/publicMaintenanceConfigurations/SQL_Default + managed_instance_create_mode: + description: + - Specifies the mode of database creation. + type: str + returned: always + sample: null + minimal_tls_version: + description: + - Minimal TLS version. Allowed values 'None', '1.0', '1.1', '1.2'. + type: str + returned: always + sample: 1.2 + primary_user_assigned_identity_id: + description: + - The resource ID of a user assigned identity to be used by default. + type: str + returned: always + sample: null + private_endpoint_connections: + description: + - List of private endpoint connections on a managed instance. + type: list + returned: always + sample: [] + provisioning_state: + description: + - The Status of the SQL managed instance. + type: str + returned: always + sample: Successed + proxy_override: + description: + - Connection type used for connecting to the instance. + type: str + returned: always + sample: Proxy + public_data_endpoint_enabled: + description: + - Whether or not the public data endpoint is enabled. + type: bool + returned: always + sample: false + restore_point_in_time: + description: + - Specifies the point in time (ISO8601 format) of the source database that will be restored to create the new database. + type: str + returned: always + sample: null + source_managed_instance_id: + description: + - The resource identifier of the source managed instance associated with create operation of this instance. + type: str + returned: always + sample: null + state: + description: + - The state of the managed instance. + type: str + returned: always + sample: Ready + storage_account_type: + description: + - The storage account type used to store backups for this instance. + type: str + returned: always + sample: GRS + storage_size_in_gb: + description: + - Storage size in GB. Minimum value 32. Maximum value 8192. + type: int + returned: always + sample: 256 + subnet_id: + description: + - Subnet resource ID for the managed instance. + type: str + returned: always + sample: /subscriptions/xxx-xxxx/resourceGroups/testRG/providers/Microsoft.Network/virtualNetworks/vnet-smi/subnets/sqi_sub + timezone_id: + description: + - ID of the timezone. Allowed values are timezones supported by Windows. + type: str + returned: always + sample: UTC + type: + description: + - The SQL managed instance type. + type: str + returned: always + sample: "Microsoft.Sql/managedInstances" + v_cores: + description: + - The number of vCores. Allowed values 8, 16, 24, 32, 40, 64, 80. + type: int + returned: always + sample: 8 + zone_redundant: + description: + - Whether or not the multi-az is enabled. + type: bool + returned: always + sample: false +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMSqManagedInstanceInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + ), + name=dict( + type='str', + ), + tags=dict( + type='list', + elements='str' + ) + ) + # store the results of the module operation + self.results = dict( + changed=False + ) + self.resource_group = None + self.name = None + self.tags = None + super(AzureRMSqManagedInstanceInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False, facts_module=True) + + def exec_module(self, **kwargs): + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.name is not None and self.resource_group is not None: + self.results['sql_managed_instance'] = self.get() + elif self.resource_group is not None: + self.results['sql_managed_instance'] = self.list_by_resource_group() + else: + self.results['sql_managed_instance'] = self.list_by_subscription() + return self.results + + def get(self): + response = None + results = [] + try: + response = self.sql_client.managed_instances.get(resource_group_name=self.resource_group, + managed_instance_name=self.name) + self.log("Response : {0}".format(response)) + except ResourceNotFoundError: + self.log('Could not get facts for SQL managed instance.') + + if response and self.has_tags(response.tags, self.tags): + results.append(self.format_item(response)) + + return results + + def list_by_resource_group(self): + response = None + results = [] + try: + response = self.sql_client.managed_instances.list_by_resource_group(resource_group_name=self.resource_group) + self.log("Response : {0}".format(response)) + except Exception: + self.fail('Could not list facts for SQL managed instance.') + + if response is not None: + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(self.format_item(item)) + + return results + + def list_by_subscription(self): + response = None + results = [] + try: + response = self.sql_client.managed_instances.list() + self.log("Response : {0}".format(response)) + except Exception: + self.fail('Could not list facts for SQL Managed Instance.') + + if response is not None: + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(self.format_item(item)) + + return results + + def format_item(self, item): + d = item.as_dict() + d = { + 'resource_group': self.resource_group, + 'id': d.get('id', None), + 'name': d.get('name', None), + 'location': d.get('location', None), + 'type': d.get('type', None), + 'tags': d.get('tags', None), + 'identity': { + 'user_assigned_identities': d.get('identity', {}).get('user_assigned_identities', None), + 'principal_id': d.get('identity', {}).get('principal_id', None), + 'type': d.get('identity', {}).get('type', None), + 'tenant_id': d.get('identity', {}).get('tenant_id', None) + }, + 'sku': { + 'name': d.get('sku', {}).get('name', None), + 'size': d.get('sku', {}).get('size', None), + 'family': d.get('sku', {}).get('family', None), + 'tier': d.get('sku', {}).get('tier', None), + 'capacity': d.get('sku', {}).get('capacity', None) + }, + 'provisioning_state': d.get('provisioning_state', None), + 'managed_instance_create_mode': d.get('managed_instance_create_mode', None), + 'fully_qualified_domain_name': d.get('fully_qualified_domain_name', None), + 'administrator_login': d.get('administrator_login', None), + 'subnet_id': d.get('subnet_id', None), + 'state': d.get('state', None), + 'license_type': d.get('license_type', None), + 'v_cores': d.get('v_cores', None), + 'storage_size_in_gb': d.get('storage_size_in_gb', None), + 'collation': d.get('collation', None), + 'dns_zone': d.get('dns_zone', None), + 'dns_zone_partner': d.get('dns_zone_partner', None), + 'public_data_endpoint_enabled': d.get('public_data_endpoint_enabled', None), + 'source_managed_instance_id': d.get('source_managed_instance_id', None), + 'restore_point_in_time': d.get('restore_point_in_time', None), + 'proxy_override': d.get('proxy_override', None), + 'timezone_id': d.get('timezone_id', None), + 'instance_pool_id': d.get('instance_pool_id', None), + 'maintenance_configuration_id': d.get('maintenance_configuration_id', None), + 'private_endpoint_connections': d.get('private_endpoint_connections', None), + 'minimal_tls_version': d.get('minimal_tls_version', None), + 'storage_account_type': d.get('storage_account_type', None), + 'zone_redundant': d.get('zone_redundant', None), + 'primary_user_assigned_identity_id': d.get('primary_user_assigned_identity_id', None), + 'key_id': d.get('key_id', None), + 'administrators': d.get('administrators', None) + } + return d + + +def main(): + AzureRMSqManagedInstanceInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlserver.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlserver.py new file mode 100644 index 000000000..f4cf45f08 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlserver.py @@ -0,0 +1,443 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Zim Kalinowski, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_sqlserver +version_added: "0.1.2" +short_description: Manage SQL Server instance +description: + - Create, update and delete instance of SQL Server. + +options: + resource_group: + description: + - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. + required: True + name: + description: + - The name of the server. + required: True + location: + description: + - Resource location. + admin_username: + description: + - Username of the SQL administrator account for server. Once created it cannot be changed. + admin_password: + description: + - Password of the SQL administrator account for server (required for server creation). + version: + description: + - The version of the server. For example C(12.0). + identity: + description: + - The identity type. Set this to C(SystemAssigned) in order to automatically create and assign an Azure Active Directory principal for the resource. + - Possible values include C(SystemAssigned). + minimal_tls_version: + description: + - Require clients to use a specified TLS version. + type: str + choices: + - '1.0' + - '1.1' + - '1.2' + version_added: "1.11.0" + public_network_access: + description: + - Whether or not public endpoint access is allowed for the server. + type: str + choices: + - Enabled + - Disabled + version_added: "1.11.0" + restrict_outbound_network_access: + description: + - Whether or not to restrict outbound network access for this server. + type: str + choices: + - Enabled + - Disabled + version_added: "1.11.0" + change_admin_password: + description: + - Whether or not the I(admin_password) should be updated for an existing server. If C(true), the password is the only value which will be updated. + type: bool + default: false + version_added: "1.11.0" + administrators: + description: + - The Azure Active Directory identity of the server. + type: dict + suboptions: + administrator_type: + description: + - Type of the Azure AD administrator. + type: str + default: ActiveDirectory + principal_type: + description: + - Principal Type of the Azure AD administrator. + type: str + choices: + - User + - Group + - Application + login: + description: + - Login name of the Azure AD administrator. + type: str + sid: + description: + - SID (object ID) of the Azure AD administrator. + type: str + tenant_id: + description: + - Tenant ID of the Azure AD administrator. + type: str + azure_ad_only_authentication: + description: + - Azure AD only authentication enabled. + type: bool + version_added: "1.11.0" + state: + description: + - State of the SQL server. Use C(present) to create or update a server and use C(absent) to delete a server. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' +- name: Create (or update) SQL Server + azure_rm_sqlserver: + resource_group: myResourceGroup + name: server_name + location: westus + admin_username: mylogin + admin_password: Testpasswordxyz12! + +- name: Change SQL Server admin password + azure_rm_sqlserver: + resource_group: myResourceGroup + name: server_name + location: westus + admin_password: NewPasswordx123! + change_admin_password: true + +- name: Create SQL Server with Azure Active Directory admin + azure_rm_sqlserver: + resource_group: myResourceGroup + name: server_name + location: westus + admin_username: mylogin + admin_password: Testpasswordxyz12! + administrators: + principal_type: Group + login: MySqlAdminGroup + sid: "{{ MySqlAdminGroup.object_id }}" + tenant_id: "{{ my_tenant_id }}" + azure_ad_only_authentication: false +''' + +RETURN = ''' +id: + description: + - Resource ID. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Sql/servers/sqlcrudtest-4645 +version: + description: + - The version of the server. + returned: always + type: str + sample: 12.0 +state: + description: + - The state of the server. + returned: always + type: str + sample: state +fully_qualified_domain_name: + description: + - The fully qualified domain name of the server. + returned: always + type: str + sample: sqlcrudtest-4645.database.windows.net +''' + +import time +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.core.polling import LROPoller +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +administrators_spec = dict( + administrator_type=dict(type='str', default='ActiveDirectory'), + principal_type=dict(type='str', choices=['User', 'Group', 'Application']), + login=dict(type='str'), + sid=dict(type='str'), + tenant_id=dict(type='str'), + azure_ad_only_authentication=dict(type='bool'), +) + + +class AzureRMSqlServer(AzureRMModuleBaseExt): + """Configuration class for an Azure RM SQL Server resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + location=dict( + type='str' + ), + admin_username=dict( + type='str' + ), + admin_password=dict( + type='str', + no_log=True + ), + version=dict( + type='str' + ), + identity=dict( + type='str' + ), + minimal_tls_version=dict( + type="str", + choices=["1.0", "1.1", "1.2"] + ), + public_network_access=dict( + type="str", + choices=["Enabled", "Disabled"] + ), + restrict_outbound_network_access=dict( + type="str", + choices=["Enabled", "Disabled"] + ), + change_admin_password=dict( + type="bool", + default=False, + no_log=False, + ), + administrators=dict( + type='dict', + options=administrators_spec, + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.name = None + self.parameters = dict() + self.tags = None + + self.results = dict(changed=False) + self.state = None + self.to_do = Actions.NoAction + self.change_admin_password = False + + super(AzureRMSqlServer, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()) + ['tags']: + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + if key == "admin_username": + self.parameters.update({"administrator_login": kwargs[key]}) + elif key == "admin_password": + self.parameters.update({"administrator_login_password": kwargs[key]}) + elif key == "identity": + self.parameters.update({"identity": {"type": kwargs[key]}}) + else: + self.parameters[key] = kwargs[key] + + old_response = None + response = None + + resource_group = self.get_resource_group(self.resource_group) + + if "location" not in self.parameters: + self.parameters["location"] = resource_group.location + + old_response = self.get_sqlserver() + + if not old_response: + self.log("SQL Server instance doesn't exist") + if self.state == 'absent': + self.log("Old instance didn't exist") + else: + self.to_do = Actions.Create + else: + self.log("SQL Server instance already exists") + if self.state == 'absent': + self.to_do = Actions.Delete + elif self.state == 'present': + self.log("Need to check if SQL Server instance has to be deleted or may be updated") + update_tags, newtags = self.update_tags(old_response.get('tags', dict())) + if update_tags: + self.tags = newtags + admin_pass = self.parameters.pop('administrator_login_password', None) # remove for comparison as value not returned in old_response + if self.change_admin_password: + self.parameters.update(old_response) # use all existing config + self.parameters.update({"administrator_login_password": admin_pass}) + self.results['compare'] = [] + if not self.idempotency_check(old_response, self.parameters): + self.to_do = Actions.Update + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.log("Need to Create / Update the SQL Server instance") + + if self.check_mode: + self.results['changed'] = True + return self.results + + self.parameters['tags'] = self.tags + response = self.create_update_sqlserver() + response.pop('administrator_login_password', None) + + if not old_response: + self.results['changed'] = True + else: + self.results['changed'] = True if self.change_admin_password else old_response.__ne__(response) + self.log("Creation / Update done") + elif self.to_do == Actions.Delete: + self.log("SQL Server instance deleted") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_sqlserver() + # make sure instance is actually deleted, for some Azure resources, instance is hanging around + # for some time after deletion -- this should be really fixed in Azure + while self.get_sqlserver(): + time.sleep(20) + else: + self.log("SQL Server instance unchanged") + self.results['changed'] = False + response = old_response + + if response: + self.results.update(self.format_results(response)) + + return self.results + + def create_update_sqlserver(self): + ''' + Creates or updates SQL Server with the specified configuration. + + :return: deserialized SQL Server instance state dictionary + ''' + self.log("Creating / Updating the SQL Server instance {0}".format(self.name)) + + try: + response = self.sql_client.servers.begin_create_or_update(resource_group_name=self.resource_group, + server_name=self.name, + parameters=self.parameters) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + + except Exception as exc: + self.log('Error attempting to create the SQL Server instance.') + self.fail("Error creating the SQL Server instance: {0}".format(str(exc))) + return response.as_dict() + + def delete_sqlserver(self): + ''' + Deletes specified SQL Server instance in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the SQL Server instance {0}".format(self.name)) + try: + response = self.sql_client.servers.begin_delete(resource_group_name=self.resource_group, + server_name=self.name) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + except Exception as e: + self.log('Error attempting to delete the SQL Server instance.') + self.fail("Error deleting the SQL Server instance: {0}".format(str(e))) + + return True + + def get_sqlserver(self): + ''' + Gets the properties of the specified SQL Server. + + :return: deserialized SQL Server instance state dictionary + ''' + self.log("Checking if the SQL Server instance {0} is present".format(self.name)) + found = False + try: + response = self.sql_client.servers.get(resource_group_name=self.resource_group, + server_name=self.name) + found = True + self.log("Response : {0}".format(response)) + self.log("SQL Server instance : {0} found".format(response.name)) + except ResourceNotFoundError: + self.log('Did not find the SQL Server instance.') + if found is True: + return response.as_dict() + + return False + + def format_results(self, response): + return { + "id": response.get("id"), + "version": response.get("version"), + "state": response.get("state"), + "fully_qualified_domain_name": response.get("fully_qualified_domain_name"), + } + + +def main(): + """Main execution""" + AzureRMSqlServer() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlserver_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlserver_info.py new file mode 100644 index 000000000..06e34ea22 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlserver_info.py @@ -0,0 +1,291 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Zim Kalinowski, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_sqlserver_info +version_added: "0.1.2" +short_description: Get SQL Server facts +description: + - Get facts of SQL Server. + +options: + resource_group: + description: + - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal. + required: True + server_name: + description: + - The name of the server. + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Get instance of SQL Server + azure_rm_sqlserver_info: + resource_group: myResourceGroup + server_name: server_name + + - name: List instances of SQL Server + azure_rm_sqlserver_info: + resource_group: myResourceGroup +''' + +RETURN = ''' +servers: + description: + - A list of dict results where the key is the name of the SQL Server and the values are the facts for that SQL Server. + returned: always + type: complex + contains: + sqlserver_name: + description: + - The key is the name of the server that the values relate to. + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Sql/servers/sqlcrudtest-4645 + name: + description: + - Resource name. + returned: always + type: str + sample: sqlcrudtest-4645 + type: + description: + - Resource type. + returned: always + type: str + sample: Microsoft.Sql/servers + location: + description: + - Resource location. + returned: always + type: str + sample: japaneast + kind: + description: + - Kind of sql server. This is metadata used for the Azure portal experience. + returned: always + type: str + sample: v12.0 + version: + description: + - The version of the server. + returned: always + type: str + sample: 12.0 + state: + description: + - The state of the server. + returned: always + type: str + sample: Ready + fully_qualified_domain_name: + description: + - The fully qualified domain name of the server. + returned: always + type: str + sample: fully_qualified_domain_name + minimal_tls_version: + description: + - The version TLS clients at which must connect. + returned: always + type: str + sample: 1.2 + version_added: "1.11.0" + public_network_access: + description: + - Whether or not public endpoint access is allowed for the server. + returned: always + type: str + sample: Enabled + version_added: "1.11.0" + restrict_outbound_network_access: + description: + - Whether or not outbound network access is allowed for this server. + returned: always + type: str + sample: Enabled + version_added: "1.11.0" + admin_username: + description: + - Username of the SQL administrator account for server. + returned: always + type: str + sample: sqladmin + version_added: "1.11.0" + administrators: + description: + - The Azure Active Directory identity of the server. + returned: always + type: dict + version_added: "1.11.0" + contains: + administrator_type: + description: + - Type of the Azure AD administrator. + type: str + sample: ActiveDirectory + azure_ad_only_authentication: + description: + - Azure AD only authentication enabled. + type: bool + sample: False + login: + description: + - Login name of the Azure AD administrator. + type: str + sample: MyAzureAdGroup + principal_type: + description: + - Principal Type of the Azure AD administrator. + type: str + sample: Group + sid: + description: + - SID (object ID) of the Azure AD administrator. + type: str + sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + tenant_id: + description: + - Tenant ID of the Azure AD administrator. + type: str + sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMSqlServerInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + server_name=dict( + type='str' + ) + ) + # store the results of the module operation + self.results = dict( + changed=False, + ) + self.resource_group = None + self.server_name = None + super(AzureRMSqlServerInfo, self).__init__(self.module_arg_spec, supports_check_mode=True) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_sqlserver_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_sqlserver_facts' module has been renamed to 'azure_rm_sqlserver_info'", version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if (self.resource_group is not None and + self.server_name is not None): + self.results['servers'] = self.get() + elif (self.resource_group is not None): + self.results['servers'] = self.list_by_resource_group() + return self.results + + def get(self): + ''' + Gets facts of the specified SQL Server. + + :return: deserialized SQL Serverinstance state dictionary + ''' + response = None + results = {} + try: + response = self.sql_client.servers.get(resource_group_name=self.resource_group, + server_name=self.server_name) + self.log("Response : {0}".format(response)) + except ResourceNotFoundError: + self.log('Could not get facts for Servers.') + + if response is not None: + results[response.name] = self.format_results(response.as_dict()) + + return results + + def list_by_resource_group(self): + ''' + Gets facts of the specified SQL Server. + + :return: deserialized SQL Serverinstance state dictionary + ''' + response = None + results = {} + try: + response = self.sql_client.servers.list_by_resource_group(resource_group_name=self.resource_group) + self.log("Response : {0}".format(response)) + except ResourceNotFoundError: + self.log('Could not get facts for Servers.') + + if response is not None: + for item in response: + results[item.name] = self.format_results(item.as_dict()) + + return results + + def format_results(self, response): + administrators = response.get("administrators") + return { + "id": response.get("id"), + "name": response.get("name"), + "type": response.get("type"), + "location": response.get("location"), + "kind": response.get("kind"), + "version": response.get("version"), + "state": response.get("state"), + "tags": response.get("tags", {}), + "fully_qualified_domain_name": response.get("fully_qualified_domain_name"), + "minimal_tls_version": response.get("minimal_tls_version"), + "public_network_access": response.get("public_network_access"), + "restrict_outbound_network_access": response.get("restrict_outbound_network_access"), + "admin_username": response.get("administrator_login"), + "administrators": None if not administrators else { + "administrator_type": administrators.get("administrator_type"), + "azure_ad_only_authentication": administrators.get("azure_ad_only_authentication"), + "login": administrators.get("login"), + "principal_type": administrators.get("principal_type"), + "sid": administrators.get("sid"), + "tenant_id": administrators.get("tenant_id"), + }, + } + + +def main(): + AzureRMSqlServerInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_storageaccount.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_storageaccount.py new file mode 100644 index 000000000..d3b5bb807 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_storageaccount.py @@ -0,0 +1,1288 @@ +#!/usr/bin/python +# +# Copyright (c) 2016 Matt Davis, +# Chris Houseknecht, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_storageaccount +version_added: "0.1.0" +short_description: Manage Azure storage accounts +description: + - Create, update or delete a storage account. +options: + resource_group: + description: + - Name of the resource group to use. + required: true + aliases: + - resource_group_name + name: + description: + - Name of the storage account to update or create. + state: + description: + - State of the storage account. Use C(present) to create or update a storage account and use C(absent) to delete an account. + - C(failover) is used to failover the storage account to its secondary. This process can take up to a hour. + default: present + choices: + - absent + - present + - failover + location: + description: + - Valid Azure location. Defaults to location of the resource group. + account_type: + description: + - Type of storage account. Required when creating a storage account. + - C(Standard_ZRS) and C(Premium_LRS) accounts cannot be changed to other account types. + - Other account types cannot be changed to C(Standard_ZRS) or C(Premium_LRS). + choices: + - Premium_LRS + - Standard_GRS + - Standard_LRS + - Standard_RAGRS + - Standard_ZRS + - Premium_ZRS + - Standard_RAGZRS + - Standard_GZRS + aliases: + - type + custom_domain: + description: + - User domain assigned to the storage account. + - Must be a dictionary with I(name) and I(use_sub_domain) keys where I(name) is the CNAME source. + - Only one custom domain is supported per storage account at this time. + - To clear the existing custom domain, use an empty string for the custom domain name property. + - Can be added to an existing storage account. Will be ignored during storage account creation. + aliases: + - custom_dns_domain_suffix + kind: + description: + - The kind of storage. + - The C(FileStorage) and (BlockBlobStorage) only used when I(account_type=Premium_LRS) or I(account_type=Premium_ZRS). + default: 'Storage' + choices: + - Storage + - StorageV2 + - BlobStorage + - BlockBlobStorage + - FileStorage + is_hns_enabled: + description: + - Account HierarchicalNamespace enabled if sets to true. + - When I(is_hns_enabled=True), I(kind) cannot be C(Storage). + type: bool + access_tier: + description: + - The access tier for this storage account. Required when I(kind=BlobStorage). + choices: + - Hot + - Cool + force_delete_nonempty: + description: + - Attempt deletion if resource already exists and cannot be updated. + type: bool + default: False + aliases: + - force + https_only: + description: + - Allows https traffic only to storage service when set to C(True). + - If omitted, new account creation will default to True, while existing accounts will not be change. + type: bool + minimum_tls_version: + description: + - The minimum required version of Transport Layer Security (TLS) for requests to a storage account. + - If omitted, new account creation will default to null which is currently interpreted to TLS1_0. Existing accounts will not be modified. + choices: + - TLS1_0 + - TLS1_1 + - TLS1_2 + version_added: "1.0.0" + public_network_access: + description: + - Allow or disallow public network access to Storage Account. + choices: + - Enabled + - Disabled + version_added: "1.12.0" + allow_blob_public_access: + description: + - Allows blob containers in account to be set for anonymous public access. + - If set to false, no containers in this account will be able to allow anonymous public access. + - If omitted, new account creation will default to null which is currently interpreted to True. Existing accounts will not be modified. + type: bool + version_added: "1.1.0" + network_acls: + description: + - Manages the Firewall and virtual networks settings of the storage account. + type: dict + suboptions: + default_action: + description: + - Default firewall traffic rule. + - If I(default_action=Allow) no other settings have effect. + choices: + - Allow + - Deny + default: Allow + bypass: + description: + - When I(default_action=Deny) this controls which Azure components can still reach the Storage Account. + - The list is comma separated. + - It can be any combination of the example C(AzureServices), C(Logging), C(Metrics). + - If no Azure components are allowed, explicitly set I(bypass=""). + default: AzureServices + virtual_network_rules: + description: + - A list of subnets and their actions. + suboptions: + id: + description: + - The complete path to the subnet. + action: + description: + - The only logical I(action=Allow) because this setting is only accessible when I(default_action=Deny). + default: 'Allow' + ip_rules: + description: + - A list of IP addresses or ranges in CIDR format. + suboptions: + value: + description: + - The IP address or range. + action: + description: + - The only logical I(action=Allow) because this setting is only accessible when I(default_action=Deny). + default: 'Allow' + blob_cors: + description: + - Specifies CORS rules for the Blob service. + - You can include up to five CorsRule elements in the request. + - If no blob_cors elements are included in the argument list, nothing about CORS will be changed. + - If you want to delete all CORS rules and disable CORS for the Blob service, explicitly set I(blob_cors=[]). + type: list + elements: dict + suboptions: + allowed_origins: + description: + - A list of origin domains that will be allowed via CORS, or "*" to allow all domains. + type: list + elements: str + required: true + allowed_methods: + description: + - A list of HTTP methods that are allowed to be executed by the origin. + type: list + elements: str + required: true + max_age_in_seconds: + description: + - The number of seconds that the client/browser should cache a preflight response. + type: int + required: true + exposed_headers: + description: + - A list of response headers to expose to CORS clients. + type: list + elements: str + required: true + allowed_headers: + description: + - A list of headers allowed to be part of the cross-origin request. + type: list + elements: str + required: true + static_website: + description: + - Manage static website configuration for the storage account. + type: dict + version_added: "1.13.0" + suboptions: + enabled: + description: + - Indicates whether this account is hosting a static website. + type: bool + default: false + index_document: + description: + - The default name of the index page under each directory. + type: str + error_document404_path: + description: + - The absolute path of the custom 404 page. + type: str + encryption: + description: + - The encryption settings on the storage account. + type: dict + suboptions: + services: + description: + - List of services which support encryption. + type: dict + suboptions: + table: + description: + - The encryption function of the table storage service. + type: dict + suboptions: + enabled: + description: + - Whether to encrypt the table type. + type: bool + queue: + description: + - The encryption function of the queue storage service. + type: dict + suboptions: + enabled: + description: + - Whether to encrypt the queue type. + type: bool + file: + description: + - The encryption function of the file storage service. + type: dict + suboptions: + enabled: + description: + - Whether to encrypt the file type. + type: bool + blob: + description: + - The encryption function of the blob storage service. + type: dict + suboptions: + enabled: + description: + - Whether to encrypt the blob type. + type: bool + key_source: + description: + - The encryption keySource (provider). + type: str + default: Microsoft.Storage + choices: + - Microsoft.Storage + - Microsoft.Keyvault + require_infrastructure_encryption: + description: + - A boolean indicating whether or not the service applies a secondary layer of encryption with platform managed keys for data at rest. + type: bool + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Chris Houseknecht (@chouseknecht) + - Matt Davis (@nitzmahone) +''' + +EXAMPLES = ''' + - name: remove account, if it exists + azure_rm_storageaccount: + resource_group: myResourceGroup + name: clh0002 + state: absent + + - name: create an account + azure_rm_storageaccount: + resource_group: myResourceGroup + name: clh0002 + type: Standard_RAGRS + tags: + testing: testing + delete: on-exit + + - name: Create an account with kind of FileStorage + azure_rm_storageaccount: + resource_group: myResourceGroup + name: c1h0002 + type: Premium_LRS + kind: FileStorage + tags: + testing: testing + + - name: configure firewall and virtual networks + azure_rm_storageaccount: + resource_group: myResourceGroup + name: clh0002 + type: Standard_RAGRS + network_acls: + bypass: AzureServices,Metrics + default_action: Deny + virtual_network_rules: + - id: /subscriptions/mySubscriptionId/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/myVnet/subnets/mySubnet + action: Allow + ip_rules: + - value: 1.2.3.4 + action: Allow + - value: 123.234.123.0/24 + action: Allow + + - name: create an account with blob CORS + azure_rm_storageaccount: + resource_group: myResourceGroup + name: clh002 + type: Standard_RAGRS + blob_cors: + - allowed_origins: + - http://www.example.com/ + allowed_methods: + - GET + - POST + allowed_headers: + - x-ms-meta-data* + - x-ms-meta-target* + - x-ms-meta-abc + exposed_headers: + - x-ms-meta-* + max_age_in_seconds: 200 +''' + + +RETURN = ''' +state: + description: + - Current state of the storage account. + returned: always + type: complex + contains: + account_type: + description: + - Type of storage account. + returned: always + type: str + sample: Standard_RAGRS + custom_domain: + description: + - User domain assigned to the storage account. + returned: always + type: complex + contains: + name: + description: + - CNAME source. + returned: always + type: str + sample: testaccount + use_sub_domain: + description: + - Whether to use sub domain. + returned: always + type: bool + sample: true + encryption: + description: + - The encryption settings on the storage account. + type: complex + returned: always + contains: + key_source: + description: + - The encryption keySource (provider). + type: str + returned: always + sample: Microsoft.Storage + require_infrastructure_encryption: + description: + - A boolean indicating whether or not the service applies a secondary layer of encryption with platform managed keys for data at rest. + type: bool + returned: always + sample: false + services: + description: + - List of services which support encryption. + type: dict + returned: always + contains: + file: + description: + - The encryption function of the file storage service. + type: dict + returned: always + sample: {'enabled': true} + table: + description: + - The encryption function of the table storage service. + type: dict + returned: always + sample: {'enabled': true} + queue: + description: + - The encryption function of the queue storage service. + type: dict + returned: always + sample: {'enabled': true} + blob: + description: + - The encryption function of the blob storage service. + type: dict + returned: always + sample: {'enabled': true} + id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Storage/storageAccounts/clh0003" + is_hns_enabled: + description: + - Account HierarchicalNamespace enabled if sets to true. + type: bool + returned: always + sample: true + location: + description: + - Valid Azure location. Defaults to location of the resource group. + returned: always + type: str + sample: eastus2 + name: + description: + - Name of the storage account to update or create. + returned: always + type: str + sample: clh0003 + network_acls: + description: + - A set of firewall and virtual network rules + returned: always + type: dict + sample: { + "bypass": "AzureServices", + "default_action": "Deny", + "virtual_network_rules": [ + { + "action": "Allow", + "id": "/subscriptions/mySubscriptionId/resourceGroups/myResourceGroup/ \ + providers/Microsoft.Network/virtualNetworks/myVnet/subnets/mySubnet" + } + ], + "ip_rules": [ + { + "action": "Allow", + "value": "1.2.3.4" + }, + { + "action": "Allow", + "value": "123.234.123.0/24" + } + ] + } + primary_endpoints: + description: + - The URLs to retrieve the public I(blob), I(queue), or I(table) object from the primary location. + returned: always + type: dict + sample: { + "blob": "https://clh0003.blob.core.windows.net/", + "queue": "https://clh0003.queue.core.windows.net/", + "table": "https://clh0003.table.core.windows.net/" + } + primary_location: + description: + - The location of the primary data center for the storage account. + returned: always + type: str + sample: eastus2 + provisioning_state: + description: + - The status of the storage account. + - Possible values include C(Creating), C(ResolvingDNS), C(Succeeded). + returned: always + type: str + sample: Succeeded + failover_in_progress: + description: + - Status indicating the storage account is currently failing over to its secondary location. + returned: always + type: bool + sample: False + resource_group: + description: + - The resource group's name. + returned: always + type: str + sample: Testing + secondary_endpoints: + description: + - The URLs to retrieve the public I(blob), I(queue), or I(table) object from the secondary location. + returned: always + type: dict + sample: { + "blob": "https://clh0003-secondary.blob.core.windows.net/", + "queue": "https://clh0003-secondary.queue.core.windows.net/", + "table": "https://clh0003-secondary.table.core.windows.net/" + } + secondary_location: + description: + - The location of the geo-replicated secondary for the storage account. + returned: always + type: str + sample: centralus + status_of_primary: + description: + - The status of the primary location of the storage account; either C(available) or C(unavailable). + returned: always + type: str + sample: available + status_of_secondary: + description: + - The status of the secondary location of the storage account; either C(available) or C(unavailable). + returned: always + type: str + sample: available + https_only: + description: + - Allows https traffic only to storage service when set to C(true). + returned: always + type: bool + sample: false + minimum_tls_version: + description: + - The minimum TLS version permitted on requests to storage. + returned: always + type: str + sample: TLS1_2 + public_network_access: + description: + - Public network access to Storage Account allowed or disallowed. + returned: always + type: str + sample: Enabled + allow_blob_public_access: + description: + - Public access to all blobs or containers in the storage account allowed or disallowed. + returned: always + type: bool + sample: true + tags: + description: + - Resource tags. + returned: always + type: dict + sample: { 'tags1': 'value1' } + type: + description: + - The storage account type. + returned: always + type: str + sample: "Microsoft.Storage/storageAccounts" + static_website: + description: + - Static website configuration for the storage account. + returned: always + version_added: "1.13.0" + type: complex + contains: + enabled: + description: + - Whether this account is hosting a static website. + returned: always + type: bool + sample: true + index_document: + description: + - The default name of the index page under each directory. + returned: always + type: str + sample: index.html + error_document404_path: + description: + - The absolute path of the custom 404 page. + returned: always + type: str + sample: error.html +''' + + +import copy +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AZURE_SUCCESS_STATE, AzureRMModuleBase +from ansible.module_utils._text import to_native + +cors_rule_spec = dict( + allowed_origins=dict(type='list', elements='str', required=True), + allowed_methods=dict(type='list', elements='str', required=True), + max_age_in_seconds=dict(type='int', required=True), + exposed_headers=dict(type='list', elements='str', required=True), + allowed_headers=dict(type='list', elements='str', required=True), +) + +static_website_spec = dict( + enabled=dict(type='bool', default=False), + index_document=dict(type='str'), + error_document404_path=dict(type='str'), +) + + +file_spec = dict( + enabled=dict(type='bool') +) + + +queue_spec = dict( + enabled=dict(type='bool') +) + + +table_spec = dict( + enabled=dict(type='bool') +) + + +blob_spec = dict( + enabled=dict(type='bool') +) + + +def compare_cors(cors1, cors2): + if len(cors1) != len(cors2): + return False + copy2 = copy.copy(cors2) + for rule1 in cors1: + matched = False + for rule2 in copy2: + if (rule1['max_age_in_seconds'] == rule2['max_age_in_seconds'] + and set(rule1['allowed_methods']) == set(rule2['allowed_methods']) + and set(rule1['allowed_origins']) == set(rule2['allowed_origins']) + and set(rule1['allowed_headers']) == set(rule2['allowed_headers']) + and set(rule1['exposed_headers']) == set(rule2['exposed_headers'])): + matched = True + copy2.remove(rule2) + if not matched: + return False + return True + + +class AzureRMStorageAccount(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + account_type=dict(type='str', + choices=['Premium_LRS', 'Standard_GRS', 'Standard_LRS', 'Standard_RAGRS', 'Standard_ZRS', 'Premium_ZRS', + 'Standard_RAGZRS', 'Standard_GZRS'], + aliases=['type']), + custom_domain=dict(type='dict', aliases=['custom_dns_domain_suffix']), + location=dict(type='str'), + name=dict(type='str', required=True), + resource_group=dict(required=True, type='str', aliases=['resource_group_name']), + state=dict(default='present', choices=['present', 'absent', 'failover']), + force_delete_nonempty=dict(type='bool', default=False, aliases=['force']), + tags=dict(type='dict'), + kind=dict(type='str', default='Storage', choices=['Storage', 'StorageV2', 'BlobStorage', 'FileStorage', 'BlockBlobStorage']), + access_tier=dict(type='str', choices=['Hot', 'Cool']), + https_only=dict(type='bool'), + minimum_tls_version=dict(type='str', choices=['TLS1_0', 'TLS1_1', 'TLS1_2']), + public_network_access=dict(type='str', choices=['Enabled', 'Disabled']), + allow_blob_public_access=dict(type='bool'), + network_acls=dict(type='dict'), + blob_cors=dict(type='list', options=cors_rule_spec, elements='dict'), + static_website=dict(type='dict', options=static_website_spec), + is_hns_enabled=dict(type='bool'), + encryption=dict( + type='dict', + options=dict( + services=dict( + type='dict', + options=dict( + blob=dict( + type='dict', + options=blob_spec + ), + table=dict( + type='dict', + options=table_spec + ), + queue=dict( + type='dict', + options=queue_spec + ), + file=dict( + type='dict', + options=file_spec + ) + ) + ), + require_infrastructure_encryption=dict(type='bool'), + key_source=dict(type='str', choices=["Microsoft.Storage", "Microsoft.Keyvault"], default='Microsoft.Storage') + ) + ) + ) + + self.results = dict( + changed=False, + state=dict() + ) + + self.account_dict = None + self.resource_group = None + self.name = None + self.state = None + self.location = None + self.account_type = None + self.custom_domain = None + self.tags = None + self.force_delete_nonempty = None + self.kind = None + self.access_tier = None + self.https_only = None + self.minimum_tls_version = None + self.public_network_access = None + self.allow_blob_public_access = None + self.network_acls = None + self.blob_cors = None + self.static_website = None + self.encryption = None + self.is_hns_enabled = None + + super(AzureRMStorageAccount, self).__init__(self.module_arg_spec, + supports_check_mode=True) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + # Set default location + self.location = resource_group.location + + if len(self.name) < 3 or len(self.name) > 24: + self.fail("Parameter error: name length must be between 3 and 24 characters.") + + if self.custom_domain: + if self.custom_domain.get('name', None) is None: + self.fail("Parameter error: expecting custom_domain to have a name attribute of type string.") + if self.custom_domain.get('use_sub_domain', None) is None: + self.fail("Parameter error: expecting custom_domain to have a use_sub_domain " + "attribute of type boolean.") + + if self.kind in ['FileStorage', 'BlockBlobStorage', ] and self.account_type not in ['Premium_LRS', 'Premium_ZRS']: + self.fail("Parameter error: Storage account with {0} kind require account type is Premium_LRS or Premium_ZRS".format(self.kind)) + self.account_dict = self.get_account() + + if self.state == 'present' and self.account_dict and \ + self.account_dict['provisioning_state'] != AZURE_SUCCESS_STATE: + self.fail("Error: storage account {0} has not completed provisioning. State is {1}. Expecting state " + "to be {2}.".format(self.name, self.account_dict['provisioning_state'], AZURE_SUCCESS_STATE)) + + if self.account_dict is not None: + self.results['state'] = self.account_dict + else: + self.results['state'] = dict() + + if self.state == 'present': + if not self.account_dict: + self.results['state'] = self.create_account() + else: + self.update_account() + elif self.state == 'absent' and self.account_dict: + self.delete_account() + self.results['state'] = dict(Status='Deleted') + elif self.state == 'failover' and self.account_dict: + self.failover_account() + self.results['state'] = self.get_account() + + return self.results + + def check_name_availability(self): + self.log('Checking name availability for {0}'.format(self.name)) + try: + account_name = self.storage_models.StorageAccountCheckNameAvailabilityParameters(name=self.name) + self.storage_client.storage_accounts.check_name_availability(account_name) + except Exception as e: + self.log('Error attempting to validate name.') + self.fail("Error checking name availability: {0}".format(str(e))) + + def get_account(self): + self.log('Get properties for account {0}'.format(self.name)) + account_obj = None + blob_mgmt_props = None + blob_client_props = None + account_dict = None + + try: + account_obj = self.storage_client.storage_accounts.get_properties(self.resource_group, self.name) + blob_mgmt_props = self.storage_client.blob_services.get_service_properties(self.resource_group, self.name) + if self.kind != "FileStorage": + blob_client_props = self.get_blob_service_client(self.resource_group, self.name).get_service_properties() + except Exception: + pass + + if account_obj: + account_dict = self.account_obj_to_dict(account_obj, blob_mgmt_props, blob_client_props) + + return account_dict + + def account_obj_to_dict(self, account_obj, blob_mgmt_props=None, blob_client_props=None): + account_dict = dict( + id=account_obj.id, + name=account_obj.name, + location=account_obj.location, + failover_in_progress=(account_obj.failover_in_progress + if account_obj.failover_in_progress is not None else False), + resource_group=self.resource_group, + type=account_obj.type, + access_tier=account_obj.access_tier, + sku_tier=account_obj.sku.tier, + sku_name=account_obj.sku.name, + provisioning_state=account_obj.provisioning_state, + secondary_location=account_obj.secondary_location, + status_of_primary=account_obj.status_of_primary, + status_of_secondary=account_obj.status_of_secondary, + primary_location=account_obj.primary_location, + https_only=account_obj.enable_https_traffic_only, + minimum_tls_version=account_obj.minimum_tls_version, + public_network_access=account_obj.public_network_access, + allow_blob_public_access=account_obj.allow_blob_public_access, + network_acls=account_obj.network_rule_set, + is_hns_enabled=account_obj.is_hns_enabled if account_obj.is_hns_enabled else False, + static_website=dict( + enabled=False, + index_document=None, + error_document404_path=None, + ), + ) + account_dict['custom_domain'] = None + if account_obj.custom_domain: + account_dict['custom_domain'] = dict( + name=account_obj.custom_domain.name, + use_sub_domain=account_obj.custom_domain.use_sub_domain + ) + + account_dict['primary_endpoints'] = None + if account_obj.primary_endpoints: + account_dict['primary_endpoints'] = dict( + blob=account_obj.primary_endpoints.blob, + queue=account_obj.primary_endpoints.queue, + table=account_obj.primary_endpoints.table + ) + account_dict['secondary_endpoints'] = None + if account_obj.secondary_endpoints: + account_dict['secondary_endpoints'] = dict( + blob=account_obj.secondary_endpoints.blob, + queue=account_obj.secondary_endpoints.queue, + table=account_obj.secondary_endpoints.table + ) + account_dict['tags'] = None + if account_obj.tags: + account_dict['tags'] = account_obj.tags + if blob_mgmt_props and blob_mgmt_props.cors and blob_mgmt_props.cors.cors_rules: + account_dict['blob_cors'] = [dict( + allowed_origins=[to_native(y) for y in x.allowed_origins], + allowed_methods=[to_native(y) for y in x.allowed_methods], + max_age_in_seconds=x.max_age_in_seconds, + exposed_headers=[to_native(y) for y in x.exposed_headers], + allowed_headers=[to_native(y) for y in x.allowed_headers] + ) for x in blob_mgmt_props.cors.cors_rules] + + if blob_client_props and blob_client_props['static_website']: + static_website = blob_client_props['static_website'] + account_dict['static_website'] = dict( + enabled=static_website.enabled, + index_document=static_website.index_document, + error_document404_path=static_website.error_document404_path, + ) + + account_dict['network_acls'] = None + if account_obj.network_rule_set: + account_dict['network_acls'] = dict( + bypass=account_obj.network_rule_set.bypass, + default_action=account_obj.network_rule_set.default_action + ) + account_dict['network_acls']['virtual_network_rules'] = [] + if account_obj.network_rule_set.virtual_network_rules: + for rule in account_obj.network_rule_set.virtual_network_rules: + account_dict['network_acls']['virtual_network_rules'].append(dict(id=rule.virtual_network_resource_id, action=rule.action)) + + account_dict['network_acls']['ip_rules'] = [] + if account_obj.network_rule_set.ip_rules: + for rule in account_obj.network_rule_set.ip_rules: + account_dict['network_acls']['ip_rules'].append(dict(value=rule.ip_address_or_range, action=rule.action)) + account_dict['encryption'] = dict() + if account_obj.encryption: + account_dict['encryption']['require_infrastructure_encryption'] = account_obj.encryption.require_infrastructure_encryption + account_dict['encryption']['key_source'] = account_obj.encryption.key_source + if account_obj.encryption.services: + account_dict['encryption']['services'] = dict() + if account_obj.encryption.services.file: + account_dict['encryption']['services']['file'] = dict(enabled=True) + if account_obj.encryption.services.table: + account_dict['encryption']['services']['table'] = dict(enabled=True) + if account_obj.encryption.services.queue: + account_dict['encryption']['services']['queue'] = dict(enabled=True) + if account_obj.encryption.services.blob: + account_dict['encryption']['services']['blob'] = dict(enabled=True) + + return account_dict + + def failover_account(self): + + if str(self.account_dict['sku_name']) not in ["Standard_GZRS", "Standard_GRS", "Standard_RAGZRS", "Standard_RAGRS"]: + self.fail("Storage account SKU ({0}) does not support failover to a secondary region.".format(self.account_dict['sku_name'])) + try: + account_obj = self.storage_client.storage_accounts.get_properties(self.resource_group, self.name, expand='georeplicationstats') + except Exception as exc: + self.fail("Error occured while acquiring geo-replication status. {0}".format(str(exc))) + + if account_obj.failover_in_progress: + self.fail("Storage account is already in process of failing over to secondary region.") + + if not account_obj.geo_replication_stats.can_failover: + self.fail("Storage account is unable to failover. Secondary region has status of {0}".format(account_obj.geo_replication_stats.status)) + + try: + poller = self.storage_client.storage_accounts.begin_failover(self.resource_group, self.name) + result = self.get_poller_result(poller) + except Exception as exc: + self.fail("Error occured while attempting a failover operation. {0}".format(str(exc))) + + self.results['changed'] = True + return result + + def update_network_rule_set(self): + if not self.check_mode: + try: + parameters = self.storage_models.StorageAccountUpdateParameters(network_rule_set=self.network_acls) + self.storage_client.storage_accounts.update(self.resource_group, + self.name, + parameters) + except Exception as exc: + self.fail("Failed to update account type: {0}".format(str(exc))) + + def sort_list_of_dicts(self, rule_set, dict_key): + return sorted(rule_set, key=lambda i: i[dict_key]) + + def update_account(self): + self.log('Update storage account {0}'.format(self.name)) + if self.network_acls: + if self.network_acls.get('default_action', 'Allow') != self.account_dict['network_acls']['default_action']: + self.results['changed'] = True + self.account_dict['network_acls']['default_action'] = self.network_acls['default_action'] + self.update_network_rule_set() + + if self.network_acls.get('default_action', 'Allow') == 'Deny': + if self.network_acls['bypass'] != self.account_dict['network_acls']['bypass']: + self.results['changed'] = True + self.account_dict['network_acls']['bypass'] = self.network_acls['bypass'] + self.update_network_rule_set() + + if self.network_acls.get('virtual_network_rules', None) is not None and self.account_dict['network_acls']['virtual_network_rules'] != []: + if self.sort_list_of_dicts(self.network_acls['virtual_network_rules'], 'id') != \ + self.sort_list_of_dicts(self.account_dict['network_acls']['virtual_network_rules'], 'id'): + self.results['changed'] = True + self.account_dict['network_acls']['virtual_network_rules'] = self.network_acls['virtual_network_rules'] + self.update_network_rule_set() + if self.network_acls.get('virtual_network_rules', None) is not None and self.account_dict['network_acls']['virtual_network_rules'] == []: + self.results['changed'] = True + self.update_network_rule_set() + + if self.network_acls.get('ip_rules', None) is not None and self.account_dict['network_acls']['ip_rules'] != []: + if self.sort_list_of_dicts(self.network_acls['ip_rules'], 'value') != \ + self.sort_list_of_dicts(self.account_dict['network_acls']['ip_rules'], 'value'): + self.results['changed'] = True + self.account_dict['network_acls']['ip_rules'] = self.network_acls['ip_rules'] + self.update_network_rule_set() + if self.network_acls.get('ip_rules', None) is not None and self.account_dict['network_acls']['ip_rules'] == []: + self.results['changed'] = True + self.update_network_rule_set() + + if self.is_hns_enabled is not None and bool(self.is_hns_enabled) != bool(self.account_dict.get('is_hns_enabled')): + self.results['changed'] = True + self.account_dict['is_hns_enabled'] = self.is_hns_enabled + if not self.check_mode: + self.fail("The is_hns_enabled parameter not support to update, from {0} to {1}". + format(bool(self.account_dict.get('is_hns_enabled')), self.is_hns_enabled)) + + if self.https_only is not None and bool(self.https_only) != bool(self.account_dict.get('https_only')): + self.results['changed'] = True + self.account_dict['https_only'] = self.https_only + if not self.check_mode: + try: + parameters = self.storage_models.StorageAccountUpdateParameters(enable_https_traffic_only=self.https_only) + self.storage_client.storage_accounts.update(self.resource_group, + self.name, + parameters) + except Exception as exc: + self.fail("Failed to update https only: {0}".format(str(exc))) + + if self.minimum_tls_version is not None and self.minimum_tls_version != self.account_dict.get('minimum_tls_version'): + self.results['changed'] = True + self.account_dict['minimum_tls_version'] = self.minimum_tls_version + if not self.check_mode: + try: + parameters = self.storage_models.StorageAccountUpdateParameters(minimum_tls_version=self.minimum_tls_version) + self.storage_client.storage_accounts.update(self.resource_group, + self.name, + parameters) + except Exception as exc: + self.fail("Failed to update minimum tls: {0}".format(str(exc))) + + if self.public_network_access is not None and self.public_network_access != self.account_dict.get('public_network_access'): + self.results['changed'] = True + self.account_dict['public_network_access'] = self.public_network_access + if not self.check_mode: + try: + parameters = self.storage_models.StorageAccountUpdateParameters(public_network_access=self.public_network_access) + self.storage_client.storage_accounts.update(self.resource_group, + self.name, + parameters) + except Exception as exc: + self.fail("Failed to update public network access: {0}".format(str(exc))) + + if self.allow_blob_public_access is not None and self.allow_blob_public_access != self.account_dict.get('allow_blob_public_access'): + self.results['changed'] = True + self.account_dict['allow_blob_public_access'] = self.allow_blob_public_access + if not self.check_mode: + try: + parameters = self.storage_models.StorageAccountUpdateParameters(allow_blob_public_access=self.allow_blob_public_access) + self.storage_client.storage_accounts.update(self.resource_group, + self.name, + parameters) + except Exception as exc: + self.fail("Failed to update allow public blob access: {0}".format(str(exc))) + + if self.account_type: + if self.account_type != self.account_dict['sku_name']: + # change the account type + SkuName = self.storage_models.SkuName + if self.account_dict['sku_name'] in [SkuName.premium_lrs, SkuName.standard_zrs]: + self.fail("Storage accounts of type {0} and {1} cannot be changed.".format( + SkuName.premium_lrs, SkuName.standard_zrs)) + if self.account_type in [SkuName.premium_lrs, SkuName.standard_zrs]: + self.fail("Storage account of type {0} cannot be changed to a type of {1} or {2}.".format( + self.account_dict['sku_name'], SkuName.premium_lrs, SkuName.standard_zrs)) + + self.results['changed'] = True + self.account_dict['sku_name'] = self.account_type + + if self.results['changed'] and not self.check_mode: + # Perform the update. The API only allows changing one attribute per call. + try: + self.log("sku_name: %s" % self.account_dict['sku_name']) + self.log("sku_tier: %s" % self.account_dict['sku_tier']) + sku = self.storage_models.Sku(name=SkuName(self.account_dict['sku_name'])) + sku.tier = self.storage_models.SkuTier(self.account_dict['sku_tier']) + parameters = self.storage_models.StorageAccountUpdateParameters(sku=sku) + self.storage_client.storage_accounts.update(self.resource_group, + self.name, + parameters) + except Exception as exc: + self.fail("Failed to update account type: {0}".format(str(exc))) + + if self.custom_domain: + if not self.account_dict['custom_domain'] or self.account_dict['custom_domain'] != self.custom_domain: + self.results['changed'] = True + self.account_dict['custom_domain'] = self.custom_domain + + if self.results['changed'] and not self.check_mode: + new_domain = self.storage_models.CustomDomain(name=self.custom_domain['name'], + use_sub_domain=self.custom_domain['use_sub_domain']) + parameters = self.storage_models.StorageAccountUpdateParameters(custom_domain=new_domain) + try: + self.storage_client.storage_accounts.update(self.resource_group, self.name, parameters) + except Exception as exc: + self.fail("Failed to update custom domain: {0}".format(str(exc))) + + if self.access_tier: + if not self.account_dict['access_tier'] or self.account_dict['access_tier'] != self.access_tier: + self.results['changed'] = True + self.account_dict['access_tier'] = self.access_tier + + if self.results['changed'] and not self.check_mode: + parameters = self.storage_models.StorageAccountUpdateParameters(access_tier=self.access_tier) + try: + self.storage_client.storage_accounts.update(self.resource_group, self.name, parameters) + except Exception as exc: + self.fail("Failed to update access tier: {0}".format(str(exc))) + + update_tags, self.account_dict['tags'] = self.update_tags(self.account_dict['tags']) + if update_tags: + self.results['changed'] = True + if not self.check_mode: + parameters = self.storage_models.StorageAccountUpdateParameters(tags=self.account_dict['tags']) + try: + self.storage_client.storage_accounts.update(self.resource_group, self.name, parameters) + except Exception as exc: + self.fail("Failed to update tags: {0}".format(str(exc))) + + if self.blob_cors and not compare_cors(self.account_dict.get('blob_cors', []), self.blob_cors): + self.results['changed'] = True + if not self.check_mode: + self.set_blob_cors() + + if self.static_website and self.static_website != self.account_dict.get("static_website", dict()): + self.results['changed'] = True + self.account_dict['static_website'] = self.static_website + self.update_static_website() + + if self.encryption is not None: + encryption_changed = False + if self.encryption.get('require_infrastructure_encryption') and bool(self.encryption.get('require_infrastructure_encryption')) \ + != bool(self.account_dict['encryption']['require_infrastructure_encryption']): + encryption_changed = True + + if self.encryption.get('key_source') != self.account_dict['encryption']['key_source']: + encryption_changed = True + + if self.encryption.get('services') is not None: + if self.encryption.get('queue') is not None and self.account_dict['encryption']['services'].get('queue') is not None: + encryption_changed = True + if self.encryption.get('file') is not None and self.account_dict['encryption']['services'].get('file') is not None: + encryption_changed = True + if self.encryption.get('table') is not None and self.account_dict['encryption']['services'].get('table') is not None: + encryption_changed = True + if self.encryption.get('blob') is not None and self.account_dict['encryption']['services'].get('blob') is not None: + encryption_changed = True + + if encryption_changed and not self.check_mode: + self.fail("The encryption can't update encryption, encryption info as {0}".format(self.account_dict['encryption'])) + + def create_account(self): + self.log("Creating account {0}".format(self.name)) + + if not self.location: + self.fail('Parameter error: location required when creating a storage account.') + + if not self.account_type: + self.fail('Parameter error: account_type required when creating a storage account.') + + if not self.access_tier and self.kind == 'BlobStorage': + self.fail('Parameter error: access_tier required when creating a storage account of type BlobStorage.') + + self.check_name_availability() + self.results['changed'] = True + + if self.check_mode: + account_dict = dict( + location=self.location, + account_type=self.account_type, + name=self.name, + resource_group=self.resource_group, + enable_https_traffic_only=self.https_only, + minimum_tls_version=self.minimum_tls_version, + public_network_access=self.public_network_access, + allow_blob_public_access=self.allow_blob_public_access, + encryption=self.encryption, + is_hns_enabled=self.is_hns_enabled, + tags=dict() + ) + if self.tags: + account_dict['tags'] = self.tags + if self.network_acls: + account_dict['network_acls'] = self.network_acls + if self.blob_cors: + account_dict['blob_cors'] = self.blob_cors + if self.static_website: + account_dict['static_website'] = self.static_website + return account_dict + sku = self.storage_models.Sku(name=self.storage_models.SkuName(self.account_type)) + sku.tier = self.storage_models.SkuTier.standard if 'Standard' in self.account_type else \ + self.storage_models.SkuTier.premium + # pylint: disable=missing-kwoa + parameters = self.storage_models.StorageAccountCreateParameters(sku=sku, + kind=self.kind, + location=self.location, + tags=self.tags, + enable_https_traffic_only=self.https_only, + minimum_tls_version=self.minimum_tls_version, + public_network_access=self.public_network_access, + allow_blob_public_access=self.allow_blob_public_access, + encryption=self.encryption, + is_hns_enabled=self.is_hns_enabled, + access_tier=self.access_tier) + self.log(str(parameters)) + try: + poller = self.storage_client.storage_accounts.begin_create(self.resource_group, self.name, parameters) + self.get_poller_result(poller) + except Exception as e: + self.log('Error creating storage account.') + self.fail("Failed to create account: {0}".format(str(e))) + if self.network_acls: + self.set_network_acls() + if self.blob_cors: + self.set_blob_cors() + if self.static_website: + self.update_static_website() + return self.get_account() + + def delete_account(self): + if self.account_dict['provisioning_state'] == self.storage_models.ProvisioningState.succeeded.value and \ + not self.force_delete_nonempty and self.account_has_blob_containers(): + self.fail("Account contains blob containers. Is it in use? Use the force_delete_nonempty option to attempt deletion.") + + self.log('Delete storage account {0}'.format(self.name)) + self.results['changed'] = True + if not self.check_mode: + try: + status = self.storage_client.storage_accounts.delete(self.resource_group, self.name) + self.log("delete status: ") + self.log(str(status)) + except Exception as e: + self.fail("Failed to delete the account: {0}".format(str(e))) + return True + + def account_has_blob_containers(self): + ''' + If there are blob containers, then there are likely VMs depending on this account and it should + not be deleted. + ''' + if self.kind == "FileStorage": + return False + self.log('Checking for existing blob containers') + blob_service = self.get_blob_service_client(self.resource_group, self.name) + try: + response = blob_service.list_containers() + except Exception: + # No blob storage available? + return False + + if len(list(response)) > 0: + return True + return False + + def set_blob_cors(self): + try: + cors_rules = self.storage_models.CorsRules(cors_rules=[self.storage_models.CorsRule(**x) for x in self.blob_cors]) + self.storage_client.blob_services.set_service_properties(self.resource_group, + self.name, + self.storage_models.BlobServiceProperties(cors=cors_rules)) + except Exception as exc: + self.fail("Failed to set CORS rules: {0}".format(str(exc))) + + def update_static_website(self): + if self.kind == "FileStorage": + return + try: + self.get_blob_service_client(self.resource_group, self.name).set_service_properties(static_website=self.static_website) + except Exception as exc: + self.fail("Failed to set static website config: {0}".format(str(exc))) + + def set_network_acls(self): + try: + parameters = self.storage_models.StorageAccountUpdateParameters(network_rule_set=self.network_acls) + self.storage_client.storage_accounts.update(self.resource_group, + self.name, + parameters) + except Exception as exc: + self.fail("Failed to update account type: {0}".format(str(exc))) + + +def main(): + AzureRMStorageAccount() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_storageaccount_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_storageaccount_info.py new file mode 100644 index 000000000..2b0c301c3 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_storageaccount_info.py @@ -0,0 +1,831 @@ +#!/usr/bin/python +# +# Copyright (c) 2016 Matt Davis, +# Chris Houseknecht, + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_storageaccount_info + +version_added: "0.1.2" + +short_description: Get storage account facts + +description: + - Get facts for one storage account or all storage accounts within a resource group. + +options: + name: + description: + - Only show results for a specific account. + resource_group: + description: + - Limit results to a resource group. Required when filtering by name. + aliases: + - resource_group_name + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + show_connection_string: + description: + - Show the connection string for each of the storageaccount's endpoints. + - For convenient usage, I(show_connection_string) will also show the access keys for each of the storageaccount's endpoints. + - Note that it will cost a lot of time when list all storageaccount rather than query a single one. + type: bool + show_blob_cors: + description: + - Show the blob CORS settings for each blob related to the storage account. + - Querying all storage accounts will take a long time. + type: bool + show_georeplication_stats: + description: + - Show the Geo Replication Stats for each storage account. + - Using this option on an account that does not support georeplication will cause a delay in getting results. + type: bool + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Chris Houseknecht (@chouseknecht) + - Matt Davis (@nitzmahone) + +''' + +EXAMPLES = ''' + - name: Get facts for one account + azure_rm_storageaccount_info: + resource_group: myResourceGroup + name: clh0002 + + - name: Get facts for all accounts in a resource group + azure_rm_storageaccount_info: + resource_group: myResourceGroup + + - name: Get facts for all accounts by tags + azure_rm_storageaccount_info: + tags: + - testing + - foo:bar +''' + +RETURN = ''' +azure_storageaccounts: + description: + - List of storage account dicts. + returned: always + type: list + example: [{ + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/myResourceGroups/testing/providers/Microsoft.Storage/storageAccounts/testaccount001", + "location": "eastus2", + "name": "testaccount001", + "properties": { + "accountType": "Standard_LRS", + "creationTime": "2016-03-28T02:46:58.290113Z", + "primaryEndpoints": { + "blob": "https://testaccount001.blob.core.windows.net/", + "file": "https://testaccount001.file.core.windows.net/", + "queue": "https://testaccount001.queue.core.windows.net/", + "table": "https://testaccount001.table.core.windows.net/" + }, + "primaryLocation": "eastus2", + "provisioningState": "Succeeded", + "statusOfPrimary": "Available" + }, + "tags": {}, + "type": "Microsoft.Storage/storageAccounts" + }] +storageaccounts: + description: + - List of storage account dicts in resource module's parameter format. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Storage/storageAccounts/t + estaccount001" + name: + description: + - Name of the storage account to update or create. + returned: always + type: str + sample: testaccount001 + location: + description: + - Valid Azure location. Defaults to location of the resource group. + returned: always + type: str + sample: eastus + account_type: + description: + - Type of storage account. + - C(Standard_ZRS) and C(Premium_LRS) accounts cannot be changed to other account types. + - Other account types cannot be changed to C(Standard_ZRS) or C(Premium_LRS). + returned: always + type: str + sample: Standard_ZRS + custom_domain: + description: + - User domain assigned to the storage account. + - Must be a dictionary with I(name) and I(use_sub_domain) keys where I(name) is the CNAME source. + returned: always + type: complex + contains: + name: + description: + - CNAME source. + returned: always + type: str + sample: testaccount + use_sub_domain: + description: + - Whether to use sub domain. + returned: always + type: bool + sample: true + encryption: + description: + - The encryption settings on the storage account. + type: complex + returned: always + contains: + key_source: + description: + - The encryption keySource (provider). + type: str + returned: always + sample: Microsoft.Storage + require_infrastructure_encryption: + description: + - A boolean indicating whether or not the service applies a secondary layer of encryption with platform managed keys for data at rest. + type: bool + returned: always + sample: false + services: + description: + - List of services which support encryption. + type: dict + returned: always + contains: + file: + description: + - The encryption function of the file storage service. + type: dict + returned: always + sample: {'enabled': true} + table: + description: + - The encryption function of the table storage service. + type: dict + returned: always + sample: {'enabled': true} + queue: + description: + - The encryption function of the queue storage service. + type: dict + returned: always + sample: {'enabled': true} + blob: + description: + - The encryption function of the blob storage service. + type: dict + returned: always + sample: {'enabled': true} + is_hns_enabled: + description: + - Account HierarchicalNamespace enabled if sets to true. + type: bool + returned: always + sample: true + kind: + description: + - The kind of storage. + returned: always + type: str + sample: Storage + access_tier: + description: + - The access tier for this storage account. + returned: always + type: str + sample: Hot + https_only: + description: + - Allows https traffic only to storage service when set to C(true). + returned: always + type: bool + sample: false + minimum_tls_version: + description: + - The minimum TLS version permitted on requests to storage. + returned: always + type: str + sample: TLS1_2 + public_network_access: + description: + - Public network access to Storage Account allowed or disallowed. + returned: always + type: str + sample: Enabled + allow_blob_public_access: + description: + - Public access to all blobs or containers in the storage account allowed or disallowed. + returned: always + type: bool + sample: true + network_acls: + description: + - A set of firewall and virtual network rules + returned: always + type: dict + sample: { + "bypass": "AzureServices", + "default_action": "Deny", + "virtual_network_rules": [ + { + "action": "Allow", + "id": "/subscriptions/mySubscriptionId/resourceGroups/myResourceGroup/ \ + providers/Microsoft.Network/virtualNetworks/myVnet/subnets/mySubnet" + } + ], + "ip_rules": [ + { + "action": "Allow", + "value": "1.2.3.4" + }, + { + "action": "Allow", + "value": "123.234.123.0/24" + } + ] + } + provisioning_state: + description: + - The status of the storage account at the time the operation was called. + - Possible values include C(Creating), C(ResolvingDNS), C(Succeeded). + returned: always + type: str + sample: Succeeded + failover_in_progress: + description: + - Status indicating the storage account is currently failing over to its secondary location. + returned: always + type: bool + sample: False + secondary_location: + description: + - The location of the geo-replicated secondary for the storage account. + - Only available if the I(account_type=Standard_GRS) or I(account_type=Standard_RAGRS). + returned: always + type: str + sample: westus + status_of_primary: + description: + - Status of the primary location of the storage account; either C(available) or C(unavailable). + returned: always + type: str + sample: available + status_of_secondary: + description: + - Status of the secondary location of the storage account; either C(available) or C(unavailable). + returned: always + type: str + sample: available + primary_location: + description: + - The location of the primary data center for the storage account. + returned: always + type: str + sample: eastus + primary_endpoints: + description: + - URLs to retrieve a public I(blob), I(file), I(queue), or I(table) object. + - Note that C(Standard_ZRS) and C(Premium_LRS) accounts only return the blob endpoint. + returned: always + type: complex + contains: + blob: + description: + - The primary blob endpoint and connection string. + returned: always + type: complex + contains: + endpoint: + description: + - The primary blob endpoint. + returned: always + type: str + sample: "https://testaccount001.blob.core.windows.net/" + connectionstring: + description: + - Connectionstring of the blob endpoint. + returned: always + type: str + sample: "DefaultEndpointsProtocol=https;EndpointSuffix=core.windows.net;AccountName=X;AccountKey=X;BlobEndpoint=X" + file: + description: + - The primary file endpoint and connection string. + returned: always + type: complex + contains: + endpoint: + description: + - The primary file endpoint. + returned: always + type: str + sample: "https://testaccount001.file.core.windows.net/" + connectionstring: + description: + - Connectionstring of the file endpoint. + returned: always + type: str + sample: "DefaultEndpointsProtocol=https;EndpointSuffix=core.windows.net;AccountName=X;AccountKey=X;FileEndpoint=X" + queue: + description: + - The primary queue endpoint and connection string. + returned: always + type: complex + contains: + endpoint: + description: + - The primary queue endpoint. + returned: always + type: str + sample: "https://testaccount001.queue.core.windows.net/" + connectionstring: + description: + - Connectionstring of the queue endpoint. + returned: always + type: str + sample: "DefaultEndpointsProtocol=https;EndpointSuffix=core.windows.net;AccountName=X;AccountKey=X;QueueEndpoint=X" + table: + description: + - The primary table endpoint and connection string. + returned: always + type: complex + contains: + endpoint: + description: + - The primary table endpoint. + returned: always + type: str + sample: "https://testaccount001.table.core.windows.net/" + connectionstring: + description: + - Connectionstring of the table endpoint. + returned: always + type: str + sample: "DefaultEndpointsProtocol=https;EndpointSuffix=core.windows.net;AccountName=X;AccountKey=X;TableEndpoint=X" + key: + description: + - The account key for the primary_endpoints + returned: always + type: str + sample: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + secondary_endpoints: + description: + - The URLs to retrieve a public I(blob), I(file), I(queue), or I(table) object from the secondary location. + - Only available if the SKU I(name=Standard_RAGRS). + returned: always + type: complex + contains: + blob: + description: + - The secondary blob endpoint and connection string. + returned: always + type: complex + contains: + endpoint: + description: + - The secondary blob endpoint. + returned: always + type: str + sample: "https://testaccount001.blob.core.windows.net/" + connectionstring: + description: + - Connectionstring of the blob endpoint. + returned: always + type: str + sample: "DefaultEndpointsProtocol=https;EndpointSuffix=core.windows.net;AccountName=X;AccountKey=X;BlobEndpoint=X" + file: + description: + - The secondary file endpoint and connection string. + returned: always + type: complex + contains: + endpoint: + description: + - The secondary file endpoint. + returned: always + type: str + sample: "https://testaccount001.file.core.windows.net/" + connectionstring: + description: + - Connectionstring of the file endpoint. + returned: always + type: str + sample: "DefaultEndpointsProtocol=https;EndpointSuffix=core.windows.net;AccountName=X;AccountKey=X;FileEndpoint=X" + queue: + description: + - The secondary queue endpoint and connection string. + returned: always + type: complex + contains: + endpoint: + description: + - The secondary queue endpoint. + returned: always + type: str + sample: "https://testaccount001.queue.core.windows.net/" + connectionstring: + description: + - Connectionstring of the queue endpoint. + returned: always + type: str + sample: "DefaultEndpointsProtocol=https;EndpointSuffix=core.windows.net;AccountName=X;AccountKey=X;QueueEndpoint=X" + table: + description: + - The secondary table endpoint and connection string. + returned: always + type: complex + contains: + endpoint: + description: + - The secondary table endpoint. + returned: always + type: str + sample: "https://testaccount001.table.core.windows.net/" + connectionstring: + description: + - Connectionstring of the table endpoint. + returned: always + type: str + sample: "DefaultEndpointsProtocol=https;EndpointSuffix=core.windows.net;AccountName=X;AccountKey=X;TableEndpoint=X" + key: + description: + - The account key for the secondary_endpoints + sample: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + georeplication_stats: + description: + - Parameters related to the status of geo-replication. + - This will be null on accounts that don't support geo-replication. + returned: always + type: complex + contains: + can_failover: + description: + - Property indicating if fail over is supported by the account. + type: bool + sample: true + last_sync_time: + description: + - Writes to the primary before this time are guaranteed to be replicated to the secondary. + type: str + sample: "2023-04-10T21:22:15+00:00" + sync_status: + description: + - Property showing status of the secondary region. + - Known values are "Live", "Bootstrap", and "Unavailable". + type: str + sample: Live + tags: + description: + - Resource tags. + returned: always + type: dict + sample: { "tag1": "abc" } + static_website: + description: + - Static website configuration for the storage account. + returned: always + version_added: "1.13.0" + type: complex + contains: + enabled: + description: + - Whether this account is hosting a static website. + returned: always + type: bool + sample: true + index_document: + description: + - The default name of the index page under each directory. + returned: always + type: str + sample: index.html + error_document404_path: + description: + - The absolute path of the custom 404 page. + returned: always + type: str + sample: error.html +''' + +try: + from azure.core.exceptions import ResourceNotFoundError +except Exception: + # This is handled in azure_rm_common + pass + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +from ansible.module_utils._text import to_native + + +AZURE_OBJECT_CLASS = 'StorageAccount' + + +class AzureRMStorageAccountInfo(AzureRMModuleBase): + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type='str'), + resource_group=dict(type='str', aliases=['resource_group_name']), + tags=dict(type='list', elements='str'), + show_connection_string=dict(type='bool'), + show_blob_cors=dict(type='bool'), + show_georeplication_stats=dict(type='bool') + ) + + self.results = dict( + changed=False, + storageaccounts=[] + ) + + self.name = None + self.resource_group = None + self.tags = None + self.show_connection_string = None + self.show_blob_cors = None + self.show_georeplication_stats = None + + super(AzureRMStorageAccountInfo, self).__init__(self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + facts_module=True) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_storageaccount_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_storageaccount_facts' module has been renamed to 'azure_rm_storageaccount_info'", version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.name and not self.resource_group: + self.fail("Parameter error: resource group required when filtering by name.") + + results = [] + if self.name: + results = self.get_account() + elif self.resource_group: + results = self.list_resource_group() + else: + results = self.list_all() + + filtered = self.filter_tag(results) + + if is_old_facts: + self.results['ansible_facts'] = { + 'azure_storageaccounts': self.serialize(filtered), + 'storageaccounts': self.format_to_dict(filtered), + } + self.results['storageaccounts'] = self.format_to_dict(filtered) + return self.results + + def get_account(self): + self.log('Get properties for account {0}'.format(self.name)) + account = None + try: + expand = None + if(self.show_georeplication_stats): + expand = 'georeplicationstats' + account = self.storage_client.storage_accounts.get_properties(self.resource_group, self.name, expand=expand) + return [account] + except Exception as exc: + + # Several errors are passed as generic HTTP errors. Catch the error and pass back the basic account information + # if the account doesn't support replication or replication stats are not available. + if "InvalidAccountType" in str(exc) or "LastSyncTimeUnavailable" in str(exc): + account = self.storage_client.storage_accounts.get_properties(self.resource_group, self.name) + return [account] + + if "AuthorizationFailed" in str(exc): + self.fail("Error authenticating with the Azure storage API. {0}".format(str(exc))) + + return [] + + def list_resource_group(self): + self.log('List items') + try: + response = self.storage_client.storage_accounts.list_by_resource_group(self.resource_group) + except Exception as exc: + self.fail("Error listing for resource group {0} - {1}".format(self.resource_group, str(exc))) + + return response + + def list_all(self): + self.log('List all items') + try: + response = self.storage_client.storage_accounts.list() + except Exception as exc: + self.fail("Error listing all items - {0}".format(str(exc))) + + return response + + def filter_tag(self, raw): + return [item for item in raw if self.has_tags(item.tags, self.tags)] + + def serialize(self, raw): + return [self.serialize_obj(item, AZURE_OBJECT_CLASS) for item in raw] + + def format_to_dict(self, raw): + return [self.account_obj_to_dict(item) for item in raw] + + def account_obj_to_dict(self, account_obj): + account_dict = dict( + id=account_obj.id, + name=account_obj.name, + location=account_obj.location, + failover_in_progress=(account_obj.failover_in_progress + if account_obj.failover_in_progress is not None else False), + access_tier=(account_obj.access_tier + if account_obj.access_tier is not None else None), + account_type=account_obj.sku.name, + kind=account_obj.kind if account_obj.kind else None, + provisioning_state=account_obj.provisioning_state, + secondary_location=account_obj.secondary_location, + status_of_primary=(account_obj.status_of_primary + if account_obj.status_of_primary is not None else None), + status_of_secondary=(account_obj.status_of_secondary + if account_obj.status_of_secondary is not None else None), + primary_location=account_obj.primary_location, + https_only=account_obj.enable_https_traffic_only, + minimum_tls_version=account_obj.minimum_tls_version, + public_network_access=account_obj.public_network_access, + allow_blob_public_access=account_obj.allow_blob_public_access, + is_hns_enabled=account_obj.is_hns_enabled if account_obj.is_hns_enabled else False, + static_website=dict( + enabled=False, + index_document=None, + error_document404_path=None, + ), + ) + + account_dict['geo_replication_stats'] = None + if account_obj.geo_replication_stats is not None: + account_dict['geo_replication_stats'] = dict( + status=account_obj.geo_replication_stats.status, + can_failover=account_obj.geo_replication_stats.can_failover, + last_sync_time=account_obj.geo_replication_stats.last_sync_time + ) + + id_dict = self.parse_resource_to_dict(account_obj.id) + account_dict['resource_group'] = id_dict.get('resource_group') + account_key = self.get_connectionstring(account_dict['resource_group'], account_dict['name']) + account_dict['custom_domain'] = None + if account_obj.custom_domain: + account_dict['custom_domain'] = dict( + name=account_obj.custom_domain.name, + use_sub_domain=account_obj.custom_domain.use_sub_domain + ) + + account_dict['network_acls'] = None + if account_obj.network_rule_set: + account_dict['network_acls'] = dict( + bypass=account_obj.network_rule_set.bypass, + default_action=account_obj.network_rule_set.default_action, + ip_rules=account_obj.network_rule_set.ip_rules + ) + if account_obj.network_rule_set.virtual_network_rules: + account_dict['network_acls']['virtual_network_rules'] = [] + for rule in account_obj.network_rule_set.virtual_network_rules: + account_dict['network_acls']['virtual_network_rules'].append(dict(id=rule.virtual_network_resource_id, action=rule.action)) + + if account_obj.network_rule_set.ip_rules: + account_dict['network_acls']['ip_rules'] = [] + for rule in account_obj.network_rule_set.ip_rules: + account_dict['network_acls']['ip_rules'].append(dict(value=rule.ip_address_or_range, action=rule.action)) + + account_dict['primary_endpoints'] = None + if account_obj.primary_endpoints: + account_dict['primary_endpoints'] = dict( + blob=self.format_endpoint_dict(account_dict['name'], account_key[0], account_obj.primary_endpoints.blob, 'blob'), + file=self.format_endpoint_dict(account_dict['name'], account_key[0], account_obj.primary_endpoints.file, 'file'), + queue=self.format_endpoint_dict(account_dict['name'], account_key[0], account_obj.primary_endpoints.queue, 'queue'), + table=self.format_endpoint_dict(account_dict['name'], account_key[0], account_obj.primary_endpoints.table, 'table') + ) + if account_key[0]: + account_dict['primary_endpoints']['key'] = '{0}'.format(account_key[0]) + account_dict['secondary_endpoints'] = None + if account_obj.secondary_endpoints: + account_dict['secondary_endpoints'] = dict( + blob=self.format_endpoint_dict(account_dict['name'], account_key[1], account_obj.primary_endpoints.blob, 'blob'), + file=self.format_endpoint_dict(account_dict['name'], account_key[1], account_obj.primary_endpoints.file, 'file'), + queue=self.format_endpoint_dict(account_dict['name'], account_key[1], account_obj.primary_endpoints.queue, 'queue'), + table=self.format_endpoint_dict(account_dict['name'], account_key[1], account_obj.primary_endpoints.table, 'table'), + ) + if account_key[1]: + account_dict['secondary_endpoints']['key'] = '{0}'.format(account_key[1]) + account_dict['tags'] = None + if account_obj.tags: + account_dict['tags'] = account_obj.tags + blob_mgmt_props = self.get_blob_mgmt_props(account_dict['resource_group'], account_dict['name']) + if blob_mgmt_props and blob_mgmt_props.cors and blob_mgmt_props.cors.cors_rules: + account_dict['blob_cors'] = [dict( + allowed_origins=to_native(x.allowed_origins), + allowed_methods=to_native(x.allowed_methods), + max_age_in_seconds=x.max_age_in_seconds, + exposed_headers=to_native(x.exposed_headers), + allowed_headers=to_native(x.allowed_headers) + ) for x in blob_mgmt_props.cors.cors_rules] + blob_client_props = self.get_blob_client_props(account_dict['resource_group'], account_dict['name'], account_dict['kind']) + if blob_client_props and blob_client_props['static_website']: + static_website = blob_client_props['static_website'] + account_dict['static_website'] = dict( + enabled=static_website.enabled, + index_document=static_website.index_document, + error_document404_path=static_website.error_document404_path, + ) + + account_dict['encryption'] = dict() + if account_obj.encryption: + account_dict['encryption']['require_infrastructure_encryption'] = account_obj.encryption.require_infrastructure_encryption + account_dict['encryption']['key_source'] = account_obj.encryption.key_source + + if account_obj.encryption.services: + account_dict['encryption']['services'] = dict() + + if account_obj.encryption.services.file: + account_dict['encryption']['services']['file'] = dict(enabled=True) + if account_obj.encryption.services.table: + account_dict['encryption']['services']['table'] = dict(enabled=True) + if account_obj.encryption.services.queue: + account_dict['encryption']['services']['queue'] = dict(enabled=True) + if account_obj.encryption.services.blob: + account_dict['encryption']['services']['blob'] = dict(enabled=True) + return account_dict + + def format_endpoint_dict(self, name, key, endpoint, storagetype, protocol='https'): + result = dict(endpoint=endpoint) + if key: + result['connectionstring'] = 'DefaultEndpointsProtocol={0};EndpointSuffix={1};AccountName={2};AccountKey={3};{4}Endpoint={5}'.format( + protocol, + self._cloud_environment.suffixes.storage_endpoint, + name, + key, + str.title(storagetype), + endpoint) + return result + + def get_blob_mgmt_props(self, resource_group, name): + if not self.show_blob_cors: + return None + try: + return self.storage_client.blob_services.get_service_properties(resource_group, name) + except Exception: + pass + return None + + def get_blob_client_props(self, resource_group, name, kind): + if kind == "FileStorage": + return None + try: + return self.get_blob_service_client(resource_group, name).get_service_properties() + except Exception: + pass + return None + + def get_connectionstring(self, resource_group, name): + keys = ['', ''] + if not self.show_connection_string: + return keys + try: + cred = self.storage_client.storage_accounts.list_keys(resource_group, name) + # get the following try catch from CLI + try: + keys = [cred.keys[0].value, cred.keys[1].value] + except AttributeError: + keys = [cred.key1, cred.key2] + except Exception: + pass + return keys + + +def main(): + AzureRMStorageAccountInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_storageblob.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_storageblob.py new file mode 100644 index 000000000..0999494be --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_storageblob.py @@ -0,0 +1,645 @@ +#!/usr/bin/python +# +# Copyright (c) 2016 Matt Davis, +# Chris Houseknecht, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_storageblob +short_description: Manage blob containers and blob objects +version_added: "0.0.1" +description: + - Create, update and delete blob containers and blob objects. + - Use to upload a file and store it as a blob object, or download a blob object to a file(upload and download mode) + - Use to upload a batch of files under a given directory(batch upload mode) + - In the batch upload mode, the existing blob object will be overwritten if a blob object with the same name is to be created. + - the module can work exclusively in three modes, when C(batch_upload_src) is set, it is working in batch upload mode; + when C(src) is set, it is working in upload mode and when C(dst) is set, it is working in dowload mode. +options: + storage_account_name: + description: + - Name of the storage account to use. + required: true + aliases: + - account_name + - storage_account + blob: + description: + - Name of a blob object within the container. + aliases: + - blob_name + blob_type: + description: + - Type of blob object. + default: block + choices: + - block + - page + version_added: "0.0.1" + container: + description: + - Name of a blob container within the storage account. + required: true + aliases: + - container_name + content_type: + description: + - Set the blob content-type header. For example C(image/png). + cache_control: + description: + - Set the blob cache-control header. + content_disposition: + description: + - Set the blob content-disposition header. + content_encoding: + description: + - Set the blob encoding header. + content_language: + description: + - Set the blob content-language header. + content_md5: + description: + - Set the blob md5 hash value. + dest: + description: + - Destination file path. Use with state C(present) to download a blob. + aliases: + - destination + force: + description: + - Overwrite existing blob or file when uploading or downloading. Force deletion of a container that contains blobs. + type: bool + default: no + resource_group: + description: + - Name of the resource group to use. + required: true + aliases: + - resource_group_name + src: + description: + - Source file path. Use with state C(present) to upload a blob. + aliases: + - source + batch_upload_src: + description: + - Batch upload source directory. Use with state C(present) to upload batch of files under the directory. + batch_upload_dst: + description: + - Base directory in container when upload batch of files. + state: + description: + - State of a container or blob. + - Use state C(absent) with a container value only to delete a container. Include a blob value to remove + a specific blob. A container will not be deleted, if it contains blobs. Use the I(force) option to override, + deleting the container and all associated blobs. + - Use state C(present) to create or update a container and upload or download a blob. If the container + does not exist, it will be created. If it exists, it will be updated with configuration options. Provide + a blob name and either src or dest to upload or download. Provide a src path to upload and a dest path + to download. If a blob (uploading) or a file (downloading) already exists, it will not be overwritten + unless I(force=true). + default: present + choices: + - absent + - present + public_access: + description: + - A container's level of public access. By default containers are private. + - Can only be set at time of container creation. + choices: + - container + - blob + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Chris Houseknecht (@chouseknecht) + - Matt Davis (@nitzmahone) + +''' + +EXAMPLES = ''' +- name: Remove container foo + azure_rm_storageblob: + resource_group: myResourceGroup + storage_account_name: clh0002 + container: foo + state: absent + +- name: Create container foo and upload a file + azure_rm_storageblob: + resource_group: myResourceGroup + storage_account_name: clh0002 + container: foo + blob: graylog.png + src: ./files/graylog.png + public_access: container + content_type: 'application/image' + +- name: Download the file + azure_rm_storageblob: + resource_group: myResourceGroup + storage_account_name: clh0002 + container: foo + blob: graylog.png + dest: ~/tmp/images/graylog.png +''' + +RETURN = ''' +blob: + description: + - Facts about the current state of the blob. + returned: when a blob is operated on + type: dict + sample: { + "content_length": 136532, + "content_settings": { + "cache_control": null, + "content_disposition": null, + "content_encoding": null, + "content_language": null, + "content_md5": null, + "content_type": "application/image" + }, + "last_modified": "09-Mar-2016 22:08:25 +0000", + "name": "graylog.png", + "tags": {}, + "type": "BlockBlob" + } +container: + description: + - Facts about the current state of the selected container. + returned: always + type: dict + sample: { + "last_modified": "09-Mar-2016 19:28:26 +0000", + "name": "foo", + "tags": {} + } +''' + +import os +import mimetypes + +try: + from azure.storage.blob._models import BlobType, ContentSettings + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + + +class AzureRMStorageBlob(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + storage_account_name=dict(required=True, type='str', aliases=['account_name', 'storage_account']), + blob=dict(type='str', aliases=['blob_name']), + blob_type=dict(type='str', default='block', choices=['block', 'page']), + container=dict(required=True, type='str', aliases=['container_name']), + dest=dict(type='path', aliases=['destination']), + force=dict(type='bool', default=False), + resource_group=dict(required=True, type='str', aliases=['resource_group_name']), + src=dict(type='str', aliases=['source']), + batch_upload_src=dict(type='path'), + batch_upload_dst=dict(type='path'), + state=dict(type='str', default='present', choices=['absent', 'present']), + public_access=dict(type='str', choices=['container', 'blob']), + content_type=dict(type='str'), + content_encoding=dict(type='str'), + content_language=dict(type='str'), + content_disposition=dict(type='str'), + cache_control=dict(type='str'), + content_md5=dict(type='str'), + ) + + mutually_exclusive = [('src', 'dest'), ('src', 'batch_upload_src'), ('dest', 'batch_upload_src')] + + self.blob_service_client = None + self.blob_details = None + self.storage_account_name = None + self.blob = None + self.blob_obj = None + self.blob_type = None + self.container = None + self.container_obj = None + self.dest = None + self.force = None + self.resource_group = None + self.src = None + self.batch_upload_src = None + self.batch_upload_dst = None + self.state = None + self.tags = None + self.public_access = None + self.results = dict( + changed=False, + actions=[], + container=dict(), + blob=dict() + ) + + super(AzureRMStorageBlob, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive, + supports_tags=True) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + self.results['check_mode'] = self.check_mode + + # add file path validation + + self.blob_service_client = self.get_blob_service_client(self.resource_group, self.storage_account_name) + self.container_obj = self.get_container() + if self.blob: + self.blob_obj = self.get_blob() + + if self.state == 'present': + if not self.container_obj: + # create the container + self.create_container() + elif self.container_obj and not self.blob: + # update container attributes + update_tags, self.container_obj['tags'] = self.update_tags(self.container_obj.get('tags')) + if update_tags: + self.update_container_tags(self.container_obj['tags']) + + if self.batch_upload_src: + self.batch_upload() + return self.results + + if self.blob: + # create, update or download blob + if self.src and self.src_is_valid(): + if self.blob_obj and not self.force: + self.log("Cannot upload to {0}. Blob with that name already exists. Use the force option".format(self.blob)) + else: + self.upload_blob() + elif self.dest and self.dest_is_valid(): + self.download_blob() + + update_tags, self.blob_obj['tags'] = self.update_tags(self.blob_obj.get('tags')) + if update_tags: + self.update_blob_tags(self.blob_obj['tags']) + + if self.blob_content_settings_differ(): + self.update_blob_content_settings() + + elif self.state == 'absent': + if self.container_obj and not self.blob: + # Delete container + if self.container_has_blobs(): + if self.force: + self.delete_container() + else: + self.log("Cannot delete container {0}. It contains blobs. Use the force option.".format( + self.container)) + else: + self.delete_container() + elif self.container_obj and self.blob_obj: + # Delete blob + self.delete_blob() + + # until we sort out how we want to do this globally + del self.results['actions'] + return self.results + + def batch_upload(self): + + def _glob_files_locally(folder_path): + + len_folder_path = len(folder_path) + 1 + + for root, v, files in os.walk(folder_path): + for f in files: + full_path = os.path.join(root, f) + yield full_path, full_path[len_folder_path:] + + def _normalize_blob_file_path(path, name): + path_sep = '/' + if path: + name = path_sep.join((path, name)) + + return path_sep.join(os.path.normpath(name).split(os.path.sep)).strip(path_sep) + + def _guess_content_type(file_path, original): + if original.content_encoding or original.content_type: + return original + + mimetypes.add_type('application/json', '.json') + mimetypes.add_type('application/javascript', '.js') + mimetypes.add_type('application/wasm', '.wasm') + + content_type, v = mimetypes.guess_type(file_path) + return ContentSettings(content_type=content_type, + content_disposition=original.content_disposition, + content_language=original.content_language, + content_md5=original.content_md5, + cache_control=original.cache_control) + + if not os.path.exists(self.batch_upload_src): + self.fail("batch upload source source directory {0} does not exist".format(self.batch_upload_src)) + + if not os.path.isdir(self.batch_upload_src): + self.fail("incorrect usage: {0} is not a directory".format(self.batch_upload_src)) + + source_dir = os.path.realpath(self.batch_upload_src) + source_files = list(_glob_files_locally(source_dir)) + + content_settings = ContentSettings(content_type=self.content_type, + content_encoding=self.content_encoding, + content_language=self.content_language, + content_disposition=self.content_disposition, + cache_control=self.cache_control, + content_md5=None) + + for src, blob_path in source_files: + if self.batch_upload_dst: + blob_path = _normalize_blob_file_path(self.batch_upload_dst, blob_path) + if not self.check_mode: + try: + client = self.blob_service_client.get_blob_client(container=self.container, blob=blob_path) + with open(src, "rb") as data: + client.upload_blob(data=data, + blob_type=self.get_blob_type(self.blob_type), + metadata=self.tags, + content_settings=_guess_content_type(src, content_settings), + overwrite=self.force) + except Exception as exc: + self.fail("Error creating blob {0} - {1}".format(src, str(exc))) + self.results['actions'].append('created blob from {0}'.format(src)) + + self.results['changed'] = True + self.results['container'] = self.container_obj + + def get_blob_type(self, blob_type): + if blob_type == "block": + return BlobType.BlockBlob + elif blob_type == "page": + return BlobType.PageBlob + else: + return BlobType.AppendBlob + + def get_container(self): + result = {} + container = None + if self.container: + try: + container = self.blob_service_client.get_container_client(container=self.container).get_container_properties() + except ResourceNotFoundError: + pass + if container: + result = dict( + name=container["name"], + tags=container["metadata"], + last_modified=container["last_modified"].strftime('%d-%b-%Y %H:%M:%S %z'), + ) + return result + + def get_blob(self): + result = dict() + blob = None + if self.blob: + try: + blob = self.blob_service_client.get_blob_client(container=self.container, blob=self.blob).get_blob_properties() + except ResourceNotFoundError: + pass + if blob: + result = dict( + name=blob["name"], + tags=blob["metadata"], + last_modified=blob["last_modified"].strftime('%d-%b-%Y %H:%M:%S %z'), + type=blob["blob_type"], + content_length=blob["size"], + content_settings=dict( + content_type=blob["content_settings"]["content_type"], + content_encoding=blob["content_settings"]["content_encoding"], + content_language=blob["content_settings"]["content_language"], + content_disposition=blob["content_settings"]["content_disposition"], + cache_control=blob["content_settings"]["cache_control"], + content_md5=blob["content_settings"]["content_md5"].hex() if blob["content_settings"]["content_md5"] else None, + ) + ) + return result + + def create_container(self): + self.log('Create container %s' % self.container) + + tags = None + if not self.blob and self.tags: + # when a blob is present, then tags are assigned at the blob level + tags = self.tags + + if not self.check_mode: + try: + client = self.blob_service_client.get_container_client(container=self.container) + client.create_container(metadata=tags, public_access=self.public_access) + except Exception as exc: + self.fail("Error creating container {0} - {1}".format(self.container, str(exc))) + self.container_obj = self.get_container() + self.results['changed'] = True + self.results['actions'].append('created container {0}'.format(self.container)) + self.results['container'] = self.container_obj + + def upload_blob(self): + content_settings = None + if self.content_type or self.content_encoding or self.content_language or self.content_disposition or \ + self.cache_control or self.content_md5: + content_settings = ContentSettings( + content_type=self.content_type, + content_encoding=self.content_encoding, + content_language=self.content_language, + content_disposition=self.content_disposition, + cache_control=self.cache_control, + content_md5=self.content_md5 + ) + if not self.check_mode: + try: + client = self.blob_service_client.get_blob_client(container=self.container, blob=self.blob) + with open(self.src, "rb") as data: + client.upload_blob(data=data, + blob_type=self.get_blob_type(self.blob_type), + metadata=self.tags, + content_settings=content_settings, + overwrite=self.force) + except Exception as exc: + self.fail("Error creating blob {0} - {1}".format(self.blob, str(exc))) + + self.blob_obj = self.get_blob() + self.results['changed'] = True + self.results['actions'].append('created blob {0} from {1}'.format(self.blob, self.src)) + self.results['container'] = self.container_obj + self.results['blob'] = self.blob_obj + + def download_blob(self): + if not self.check_mode: + try: + client = self.blob_service_client.get_blob_client(container=self.container, blob=self.blob) + with open(self.dest, "wb") as blob_stream: + blob_data = client.download_blob() + blob_data.readinto(blob_stream) + except Exception as exc: + self.fail("Failed to download blob {0}:{1} to {2} - {3}".format(self.container, + self.blob, + self.dest, + exc)) + self.results['changed'] = True + self.results['actions'].append('downloaded blob {0}:{1} to {2}'.format(self.container, + self.blob, + self.dest)) + + self.results['container'] = self.container_obj + self.results['blob'] = self.blob_obj + + def src_is_valid(self): + if not os.path.isfile(self.src): + self.fail("The source path must be a file.") + if os.access(self.src, os.R_OK): + return True + self.fail("Failed to access {0}. Make sure the file exists and that you have " + "read access.".format(self.src)) + + def dest_is_valid(self): + if not self.check_mode: + if not os.path.basename(self.dest): + # dest is a directory + if os.path.isdir(self.dest): + self.log("Path is dir. Appending blob name.") + self.dest += self.blob + else: + try: + self.log('Attempting to makedirs {0}'.format(self.dest)) + os.makedirs(self.dest) + except IOError as exc: + self.fail("Failed to create directory {0} - {1}".format(self.dest, str(exc))) + self.dest += self.blob + else: + # does path exist without basename + file_name = os.path.basename(self.dest) + path = self.dest.replace(file_name, '') + self.log('Checking path {0}'.format(path)) + if not os.path.isdir(path): + try: + self.log('Attempting to makedirs {0}'.format(path)) + os.makedirs(path) + except IOError as exc: + self.fail("Failed to create directory {0} - {1}".format(path, str(exc))) + self.log('Checking final path {0}'.format(self.dest)) + if os.path.isfile(self.dest) and not self.force: + # dest already exists and we're not forcing + self.log("Dest {0} already exists. Cannot download. Use the force option.".format(self.dest)) + return False + return True + + def delete_container(self): + if not self.check_mode: + try: + self.blob_service_client.get_container_client(container=self.container).delete_container() + except Exception as exc: + self.fail("Error deleting container {0} - {1}".format(self.container, str(exc))) + + self.results['changed'] = True + self.results['actions'].append('deleted container {0}'.format(self.container)) + + def container_has_blobs(self): + try: + blobs = self.blob_service_client.get_container_client(container=self.container).list_blobs() + except Exception as exc: + self.fail("Error list blobs in {0} - {1}".format(self.container, str(exc))) + if len(list(blobs)) > 0: + return True + return False + + def delete_blob(self): + if not self.check_mode: + try: + self.blob_service_client.get_container_client(container=self.container).delete_blob(blob=self.blob) + except Exception as exc: + self.fail("Error deleting blob {0}:{1} - {2}".format(self.container, self.blob, str(exc))) + + self.results['changed'] = True + self.results['actions'].append('deleted blob {0}:{1}'.format(self.container, self.blob)) + self.results['container'] = self.container_obj + + def update_container_tags(self, tags): + if not self.check_mode: + try: + self.blob_service_client.get_container_client(container=self.container).set_container_metadata(metadata=tags) + except Exception as exc: + self.fail("Error updating container tags {0} - {1}".format(self.container, str(exc))) + self.container_obj = self.get_container() + self.results['changed'] = True + self.results['actions'].append("updated container {0} tags.".format(self.container)) + self.results['container'] = self.container_obj + + def update_blob_tags(self, tags): + if not self.check_mode: + try: + self.blob_service_client.get_blob_client(container=self.container, blob=self.blob).set_blob_metadata(metadata=tags) + except Exception as exc: + self.fail("Update blob tags {0}:{1} - {2}".format(self.container, self.blob, str(exc))) + self.blob_obj = self.get_blob() + self.results['changed'] = True + self.results['actions'].append("updated blob {0}:{1} tags.".format(self.container, self.blob)) + self.results['container'] = self.container_obj + self.results['blob'] = self.blob_obj + + def blob_content_settings_differ(self): + if self.content_type or self.content_encoding or self.content_language or self.content_disposition or \ + self.cache_control or self.content_md5: + settings = dict( + content_type=self.content_type, + content_encoding=self.content_encoding, + content_language=self.content_language, + content_disposition=self.content_disposition, + cache_control=self.cache_control, + content_md5=self.content_md5 + ) + if self.blob_obj['content_settings'] != settings: + return True + + return False + + def update_blob_content_settings(self): + content_settings = ContentSettings( + content_type=self.content_type, + content_encoding=self.content_encoding, + content_language=self.content_language, + content_disposition=self.content_disposition, + cache_control=self.cache_control, + content_md5=self.content_md5 + ) + if not self.check_mode: + try: + self.blob_service_client.get_blob_client(container=self.container, blob=self.blob).set_http_headers(content_settings=content_settings) + except Exception as exc: + self.fail("Update blob content settings {0}:{1} - {2}".format(self.container, self.blob, str(exc))) + + self.blob_obj = self.get_blob() + self.results['changed'] = True + self.results['actions'].append("updated blob {0}:{1} content settings.".format(self.container, self.blob)) + self.results['container'] = self.container_obj + self.results['blob'] = self.blob_obj + + +def main(): + AzureRMStorageBlob() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_storageshare.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_storageshare.py new file mode 100644 index 000000000..e4749b87a --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_storageshare.py @@ -0,0 +1,356 @@ +#!/usr/bin/python +# +# Copyright (c) 2021 Andrii Bilorus +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_storageshare +version_added: "1.8.0" +short_description: Manage Azure storage file share +description: + - Create, update or delete a storage file share in existing storage account. +options: + resource_group: + description: + - Name of the resource group to use. + required: true + type: str + name: + description: + - Name of the storage file share to delete or create. + type: str + required: true + account_name: + description: + - Name of the parent storage account for the storage file share. + required: true + type: str + access_tier: + description: + - The access tier determines the price and in some cases also the performance of a file share. TransactionOptimized if not set. + type: str + choices: + - TransactionOptimized + - Hot + - Cool + - Premium + metadata: + description: + - A name-value pair to associate with the container as metadata. + type: dict + state: + description: + - State of the storage file share. Use 'present' to create or update a storage file share and use 'absent' to delete a file share. + default: present + type: str + choices: + - absent + - present + quota: + description: + - The maximum size of the file share, in gigabytes. Must be greater than 0, and less than or equal to 5TB (5120). + For large file shares, the maximum size is 102400. By default 102400 + type: int + + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Andrii Bilorus (@ewscat) +''' + +EXAMPLES = ''' +--- +- name: Create storage share + azure_rm_storageshare: + name: testShare + resource_group: myResourceGroup + account_name: testStorageAccount + state: present + access_tier: Cool + quota: 2048 + metadata: + key1: value1 + key2: value2 + +- name: Delete storage share + azure_rm_storageshare: + name: testShare + resource_group: myResourceGroup + account_name: testStorageAccount + state: absent +''' + +RETURN = ''' +state: + description: + - Facts about the current state of the storage file file share. + returned: always + type: complex + contains: + id: + description: + - Resource ID of the storage file share + sample: "/subscriptions/9e700857-1631-4d8a-aed5-908520ede375/resourceGroups/myResourceGroup/providers/Microsoft.Storage/ + storageAccounts/mystorageaccount/fileServices/default/shares/myshare" + returned: always + type: str + name: + description: + - Name of the file share + sample: myshare + returned: always + type: str + type: + description: + - The type of the resource + sample: "Microsoft.Storage/storageAccounts/fileServices/shares" + returned: always + type: str + etag: + description: + - Resource Etag + sample: "0x8D75E4BA3E275F1" + returned: always + type: str + last_modified_time: + description: + - Returns the date and time the file share was last modified + sample: "2021-08-23T08:17:35+00:00" + returned: always + type: str + metadata: + description: + - A name-value pair to associate with the file share as metadata + sample: '{"key1": "value1"}' + returned: always + type: dict + share_quota: + description: + - The maximum size of the file share, in gigabytes + sample: 102400 + returned: always + type: int + access_tier: + description: + - Access tier for specific file share + sample: 'TransactionOptimized' + returned: always + type: str + access_tier_change_time: + description: + - Indicates the last modification time for file share access tier + sample: "2021-08-23T08:17:35+00:00" + returned: always + type: str +''' + +try: + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + + +class Actions: + ''' + Action list that can be executed with storage file share + ''' + NoAction, Create, Update, Delete = range(4) + + +class AzureRMStorageShare(AzureRMModuleBase): + ''' + Configuration class for an Azure RM Storage file share resource + ''' + + def __init__(self): + + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True), + name=dict(type='str', required=True), + account_name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + access_tier=dict(type='str', default=None, + choices=['TransactionOptimized', 'Hot', 'Cool', 'Premium']), + quota=dict(type='int', default=None), + metadata=dict(type='dict', default=None), + ) + self.results = dict( + changed=False, + state=dict() + ) + + self.resource_group = None + self.name = None + self.account_name = None + self.state = None + self.quota = None + self.metadata = None + + self.to_do = Actions.NoAction + + super(AzureRMStorageShare, self).__init__(self.module_arg_spec, + supports_check_mode=True, + supports_tags=False) + + def exec_module(self, **kwargs): + ''' + Main module execution method + ''' + + for key in list(self.module_arg_spec.keys()): + setattr(self, key, kwargs[key]) + + self.log('Fetching storage file share {0}'.format(self.name)) + response = None + old_response = self.get_share() + + if old_response is None: + if self.state == "present": + self.to_do = Actions.Create + else: + if self.state == 'absent': + self.to_do = Actions.Delete + else: + self.to_do = Actions.Update + + if self.to_do == Actions.Create: + self.results['changed'] = True + if self.check_mode: + return self.results + response = self.create_storage_share() + elif self.to_do == Actions.Delete: + self.results['changed'] = True + if self.check_mode: + return self.results + response = self.delete_storage_share() + elif self.to_do == Actions.Update: + if self.update_needed(old_response): + self.results['changed'] = True + if self.check_mode: + return self.results + response = self.update_storage_share(old_response) + else: + self.results['changed'] = False + response = old_response + + if response is not None: + self.results['state'] = response + else: + self.results['state'] = dict() + + return self.results + + def update_needed(self, old_response): + ''' + Define if storage file share update needed. + :param old_response: dict with properties of the storage file share + :return: True if update needed, else False + ''' + return ((self.access_tier is not None) and (self.access_tier != old_response.get('access_tier')) or + (self.quota is not None) and (self.quota != old_response.get('share_quota')) or + (self.metadata is not None) and (self.metadata != old_response.get('metadata'))) + + def get_share(self): + ''' + Get the properties of the specified Azure Storage file share. + :return: dict with properties of the storage file share + ''' + found = False + try: + storage_share = self.storage_client.file_shares.get(resource_group_name=self.resource_group, + account_name=self.account_name, + share_name=self.name) + found = True + self.log("Response : {0}".format(storage_share)) + except ResourceNotFoundError as e: + self.log("Did not find the storage file share with name {0} : {1}".format(self.name, str(e))) + return self.storage_share_to_dict(storage_share) if found else None + + def storage_share_to_dict(self, storage_share): + ''' + Transform Azure RM Storage share object to dictionary + :param storage_share: contains information about storage file share + :type storage_share: FileShare + :return: dict generated from storage_share + ''' + return dict( + id=storage_share.id, + name=storage_share.name, + type=storage_share.type, + etag=storage_share.etag.replace('"', ''), + last_modified_time=storage_share.last_modified_time, + metadata=storage_share.metadata, + share_quota=storage_share.share_quota, + access_tier=storage_share.access_tier, + access_tier_change_time=storage_share.access_tier_change_time, + ) + + def create_storage_share(self): + ''' + Method calling the Azure SDK to create storage file share. + :return: dict with description of the new storage file share + ''' + self.log("Creating fileshare {0}".format(self.name)) + try: + self.storage_client.file_shares.create(resource_group_name=self.resource_group, + account_name=self.account_name, + share_name=self.name, + file_share=dict(access_tier=self.access_tier, + share_quota=self.quota, + metadata=self.metadata)) + except Exception as e: + self.fail("Error creating file share {0} : {1}".format(self.name, str(e))) + return self.get_share() + + def update_storage_share(self, old_responce): + ''' + Method calling the Azure SDK to update storage file share. + :param old_response: dict with properties of the storage file share + :return: dict with description of the new storage file share + ''' + self.log("Creating file share {0}".format(self.name)) + file_share_details = dict( + access_tier=self.access_tier if self.access_tier else old_responce.get('access_tier'), + share_quota=self.quota if self.quota else old_responce.get('share_quota'), + metadata=self.metadata if self.metadata else old_responce.get('metadata') + ) + try: + self.storage_client.file_shares.update(resource_group_name=self.resource_group, + account_name=self.account_name, + share_name=self.name, + file_share=file_share_details) + except Exception as e: + self.fail("Error updating file share {0} : {1}".format(self.name, str(e))) + return self.get_share() + + def delete_storage_share(self): + ''' + Method calling the Azure SDK to delete storage share. + :return: object resulting from the original request + ''' + try: + self.storage_client.file_shares.delete(resource_group_name=self.resource_group, + account_name=self.account_name, + share_name=self.name) + except Exception as e: + self.fail("Error deleting file share {0} : {1}".format(self.name, str(e))) + return self.get_share() + + +def main(): + AzureRMStorageShare() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_storageshare_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_storageshare_info.py new file mode 100644 index 000000000..cfad66aee --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_storageshare_info.py @@ -0,0 +1,281 @@ +#!/usr/bin/python +# +# Copyright (c) 2021 Andrii Bilorus +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_storageshare_info +version_added: "1.8.0" +short_description: Get Azure storage file share info +description: + - Get facts for storage file share. +options: + resource_group: + description: + - Name of the resource group to use. + required: true + type: str + name: + description: + - Name of the storage file share. + type: str + required: false + account_name: + description: + - Name of the parent storage account for the storage file share. + required: true + type: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Andrii Bilorus (@ewscat) +''' + +EXAMPLES = ''' +--- +- name: Get storage share details + azure_rm_storageshare_info: + name: testShare + resource_group: myResourceGroup + account_name: testStorageAccount + + +- name: Get all storage file shares in storage account + azure_rm_storageshare: + resource_group: myResourceGroup + account_name: testStorageAccount +''' + +RETURN = ''' +state: + description: + - Facts about the current state of the storage file share + returned: always + type: complex + contains: + id: + description: + - Resource ID of the storage file share + sample: "/subscriptions/9e700857-1631-4d8a-aed5-908520ede375/resourceGroups/myResourceGroup/providers/Microsoft.Storage/ + storageAccounts/mystorageaccount/fileServices/default/shares/myshare" + returned: always + type: str + name: + description: + - Name of the file share + sample: myshare + returned: always + type: str + type: + description: + - The type of the resource + sample: "Microsoft.Storage/storageAccounts/fileServices/shares" + returned: always + type: str + etag: + description: + - Resource Etag + sample: "0x8D75E4BA3E275F1" + returned: always + type: str + last_modified_time: + description: + - Returns the date and time the file share was last modified + sample: "2021-08-23T08:17:35+00:00" + returned: always + type: str + metadata: + description: + - A name-value pair to associate with the file share as metadata + sample: '{"key1": "value1"}' + returned: always + type: dict + share_quota: + description: + - The maximum size of the file share, in gigabytes + sample: 102400 + returned: always + type: int + access_tier: + description: + - Access tier for specific file share + sample: 'TransactionOptimized' + returned: always + type: str + access_tier_change_time: + description: + - Indicates the last modification time for file share access tier + sample: "2021-08-23T08:17:35+00:00" + returned: always + type: str + enabled_protocols: + description: + - The authentication protocol that is used for the file share. + sample: 'SMB' + returned: always + type: str + root_squash: + description: + - The property is for NFS share only. The default is NoRootSquash. + sample: 'NoRootSquash' + returned: always + type: str + version: + description: + - The version of the file share + returned: always + type: str + deleted: + description: + - Indicates whether the share was deleted + returned: always + type: str + deleted_time: + description: + - The deleted time if the share was deleted + returned: always + type: str + remaining_retention_days: + description: + - Remaining retention days for share that + returned: always + type: str + access_tier_status: + description: + - Indicates if there is a pending transition for access tier + returned: always + type: str + share_usage_bytes: + description: + - The approximate size of the data stored on the share. Note that this value may not include + all recently created or recently resized files. + returned: always + type: int +''' + +try: + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + + +class AzureRMStorageShareInfo(AzureRMModuleBase): + ''' + Info class for an Azure RM Storage share resource + ''' + + def __init__(self): + + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True), + name=dict(type='str'), + account_name=dict(type='str', required=True), + ) + self.results = dict( + changed=False, + storageshares=list() + ) + + self.resource_group = None + self.name = None + self.account_name = None + + super(AzureRMStorageShareInfo, self).__init__(self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + facts_module=True) + + def exec_module(self, **kwargs): + ''' + Main module execution method + ''' + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.name: + self.results['storageshares'] = self.get_share() + else: + self.results['storageshares'] = self.list_all() + + return self.results + + def get_share(self): + ''' + Get the properties of the specified Azure Storage file share. + :return: dict with properties of the storage file share + ''' + storage_share = None + try: + storage_share = self.storage_client.file_shares.get(resource_group_name=self.resource_group, + account_name=self.account_name, + share_name=self.name) + self.log("Response : {0}".format(storage_share)) + except ResourceNotFoundError as e: + self.log("Did not find the storage share with name {0} : {1}".format(self.name, str(e))) + return self.storage_share_to_dict(storage_share) + + def storage_share_to_dict(self, storage_share): + ''' + Transform Azure RM Storage share object to dictionary + :param storage_share: contains information about storage file share + :type storage_share: FileShare + :return: dict generated from storage_share + ''' + return dict( + id=storage_share.id, + name=storage_share.name, + type=storage_share.type, + etag=storage_share.etag.replace('"', ''), + last_modified_time=storage_share.last_modified_time, + metadata=storage_share.metadata, + share_quota=storage_share.share_quota, + access_tier=storage_share.access_tier, + access_tier_change_time=storage_share.access_tier_change_time, + enabled_protocols=storage_share.enabled_protocols, + root_squash=storage_share.root_squash, + version=storage_share.version, + deleted=storage_share.deleted, + deleted_time=storage_share.deleted_time, + remaining_retention_days=storage_share.remaining_retention_days, + access_tier_status=storage_share.access_tier_status, + share_usage_bytes=storage_share.share_usage_bytes + ) if storage_share else None + + def list_all(self): + ''' + Method calling the Azure SDK to create storage file share. + :return: dict with description of the new storage file share + ''' + ''' + Get the properties of the specified Azure Storage file share. + :return: dict with properties of the storage file share + ''' + all_items = None + try: + storage_shares = self.storage_client.file_shares.list(resource_group_name=self.resource_group, + account_name=self.account_name) + self.log("Response : {0}".format(storage_shares)) + all_items = [self.storage_share_to_dict(share) for share in storage_shares] + except Exception as e: + self.log("Did not find the storage file share : {0}".format(str(e))) + return all_items + + +def main(): + AzureRMStorageShareInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_subnet.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_subnet.py new file mode 100644 index 000000000..063ba1504 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_subnet.py @@ -0,0 +1,693 @@ +#!/usr/bin/python +# +# Copyright (c) 2016 Matt Davis, +# Chris Houseknecht, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_subnet +version_added: "0.1.0" +short_description: Manage Azure subnets +description: + - Create, update or delete a subnet within a given virtual network. + - Allows setting and updating the address prefix CIDR, which must be valid within the context of the virtual network. + - Use the M(azure.azcollection.azure_rm_networkinterface) module to associate interfaces with the subnet and assign specific IP addresses. +options: + resource_group: + description: + - Name of resource group. + required: true + name: + description: + - Name of the subnet. + required: true + address_prefix_cidr: + description: + - CIDR defining the IPv4 address space of the subnet. Must be valid within the context of the virtual network. + aliases: + - address_prefix + address_prefixes_cidr: + description: + - CIDR defining the IPv4 and IPv6 address space of the subnet. Must be valid within the context of the virtual network. + - If set I(address_prefix), It will not set. + aliases: + - address_prefixes + type: list + version_added: "1.0.0" + security_group: + description: + - Existing security group with which to associate the subnet. + - It can be the security group name which is in the same resource group. + - Can be the resource ID of the security group. + - Can be a dict containing the I(name) and I(resource_group) of the security group. + aliases: + - security_group_name + state: + description: + - Assert the state of the subnet. Use C(present) to create or update a subnet and use C(absent) to delete a subnet. + default: present + choices: + - absent + - present + virtual_network_name: + description: + - Name of an existing virtual network with which the subnet is or will be associated. + required: true + aliases: + - virtual_network + route_table: + description: + - The reference of the RouteTable resource. + - Can be the name or resource ID of the route table. + - Can be a dict containing the I(name) and I(resource_group) of the route table. + - Without this configuration, the associated route table will be dissociate. If there is no associated route table, it has no impact. + service_endpoints: + description: + - An array of service endpoints. + type: list + suboptions: + service: + description: + - The type of the endpoint service. + required: True + locations: + description: + - A list of locations. + type: list + private_endpoint_network_policies: + description: + - C(Enabled) or C(Disabled) apply network policies on private endpoints in the subnet. + type: str + default: Enabled + choices: + - Enabled + - Disabled + private_link_service_network_policies: + description: + - C(Enabled) or C(Disabled) apply network policies on private link service in the subnet. + type: str + default: Enabled + choices: + - Enabled + - Disabled + delegations: + description: + - An array of delegations. + type: list + suboptions: + name: + description: + - The name of delegation. + required: True + serviceName: + description: + - The type of the endpoint service. + required: True + choices: + - Microsoft.Web/serverFarms + - Microsoft.ContainerInstance/containerGroups + - Microsoft.Netapp/volumes + - Microsoft.HardwareSecurityModules/dedicatedHSMs + - Microsoft.ServiceFabricMesh/networks + - Microsoft.Logic/integrationServiceEnvironments + - Microsoft.Batch/batchAccounts + - Microsoft.Sql/managedInstances + - Microsoft.Web/hostingEnvironments + - Microsoft.BareMetal/CrayServers + - Microsoft.BareMetal/MonitoringServers + - Microsoft.Databricks/workspaces + - Microsoft.BareMetal/AzureHostedService + - Microsoft.BareMetal/AzureVMware + - Microsoft.BareMetal/AzureHPC + - Microsoft.BareMetal/AzurePaymentHSM + - Microsoft.StreamAnalytics/streamingJobs + - Microsoft.DBforPostgreSQL/serversv2 + - Microsoft.AzureCosmosDB/clusters + - Microsoft.MachineLearningServices/workspaces + - Microsoft.DBforPostgreSQL/singleServers + - Microsoft.DBforPostgreSQL/flexibleServers + - Microsoft.DBforMySQL/serversv2 + - Microsoft.DBforMySQL/flexibleServers + - Microsoft.ApiManagement/service + - Microsoft.Synapse/workspaces + - Microsoft.PowerPlatform/vnetaccesslinks + - Microsoft.Network/managedResolvers + - Microsoft.Kusto/clusters + actions: + description: + - A list of actions. + type: list + nat_gateway: + description: + - Existing NAT Gateway with which to associate the subnet. + - It can be the NAT Gateway name which is in the same resource group. + - Can be the resource ID of the NAT Gateway. + - Can be a dict containing the I(name) and I(resource_group) of the NAT Gateway. + type: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Chris Houseknecht (@chouseknecht) + - Matt Davis (@nitzmahone) + +''' + +EXAMPLES = ''' + - name: Create a subnet + azure_rm_subnet: + resource_group: myResourceGroup + virtual_network_name: myVirtualNetwork + name: mySubnet + address_prefix_cidr: "10.1.0.0/24" + + - name: Create a subnet refer nsg from other resource group + azure_rm_subnet: + resource_group: myResourceGroup + virtual_network_name: myVirtualNetwork + name: mySubnet + address_prefix_cidr: "10.1.0.0/16" + security_group: + name: secgroupfoo + resource_group: mySecondResourceGroup + route_table: route + + - name: Create a subnet with service endpoint + azure_rm_subnet: + resource_group: myResourceGroup + virtual_network_name: myVirtualNetwork + name: mySubnet + address_prefix_cidr: "10.1.0.0/16" + service_endpoints: + - service: "Microsoft.Sql" + locations: + - "eastus" + + - name: Create a subnet with delegations + azure_rm_subnet: + resource_group: myResourceGroup + virtual_network_name: myVirtualNetwork + name: mySubnet + address_prefix_cidr: "10.1.0.0/16" + delegations: + - name: 'mydeleg' + serviceName: 'Microsoft.ContainerInstance/containerGroups' + + - name: Create a subnet with an associated NAT Gateway + azure_rm_subnet: + resource_group: myResourceGroup + virtual_network_name: myVirtualNetwork + name: mySubnet + address_prefix_cidr: "10.1.0.0/16" + nat_gateway: myNatGateway + + - name: Delete a subnet + azure_rm_subnet: + resource_group: myResourceGroup + virtual_network_name: myVirtualNetwork + name: mySubnet + state: absent +''' + +RETURN = ''' +state: + description: + - Current state of the subnet. + returned: success + type: complex + contains: + address_prefix: + description: + - IP address CIDR. + returned: always + type: str + sample: "10.1.0.0/16" + address_prefixes: + description: + - IP address for IPv4 and IPv6 CIDR. + returned: always + type: list + sample: ["10.2.0.0/24", "fdda:e69b:1587:495e::/64"] + id: + description: + - Subnet resource path. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/virtualNetworks/myVirtualNetwork/subnets/mySubnet" + name: + description: + - Subnet name. + returned: always + type: str + sample: "foobar" + network_security_group: + description: + - Associated network security group of subnets. + returned: always + type: complex + contains: + id: + description: + - Security group resource identifier. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkSecurityGroups/secgroupfoo" + name: + description: + - Name of the security group. + returned: always + type: str + sample: "secgroupfoo" + provisioning_state: + description: + - Success or failure of the provisioning event. + returned: always + type: str + sample: "Succeeded" + private_endpoint_network_policies: + description: + - C(Enabled) or C(Disabled) apply network policies on private endpoints in the subnet. + returned: always + type: str + sample: "Enabled" + private_link_service_network_policies: + description: + - C(Enabled) or C(Disabled) apply network policies on private link service in the subnet. + returned: always + type: str + sample: "Disabled" + delegations: + description: + - Associated delegation of subnets + returned: always + type: list + contains: + name: + description: + - name of delegation + returned: when delegation is present + type: str + sample: "delegationname" + serviceName: + description: + - service associated to delegation + returned: when delegation is present + type: str + sample: "Microsoft.ContainerInstance/containerGroups" + actions: + description: + - list of actions associated with service of delegation + returned : when delegation is present + type: list + sample: ["Microsoft.Network/virtualNetworks/subnets/action"] +''' # NOQA + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase, CIDR_PATTERN, azure_id_to_dict, format_resource_id + +try: + from azure.core.exceptions import ResourceNotFoundError + from msrestazure.tools import is_valid_resource_id +except ImportError: + # This is handled in azure_rm_common + pass + + +delegations_spec = dict( + name=dict( + type='str', + required=True + ), + serviceName=dict( + type='str', + required=True, + choices=['Microsoft.Web/serverFarms', 'Microsoft.ContainerInstance/containerGroups', 'Microsoft.Netapp/volumes', + 'Microsoft.HardwareSecurityModules/dedicatedHSMs', 'Microsoft.ServiceFabricMesh/networks', + 'Microsoft.Logic/integrationServiceEnvironments', 'Microsoft.Batch/batchAccounts', 'Microsoft.Sql/managedInstances', + 'Microsoft.Web/hostingEnvironments', 'Microsoft.BareMetal/CrayServers', 'Microsoft.BareMetal/MonitoringServers', + 'Microsoft.Databricks/workspaces', 'Microsoft.BareMetal/AzureHostedService', 'Microsoft.BareMetal/AzureVMware', + 'Microsoft.BareMetal/AzureHPC', 'Microsoft.BareMetal/AzurePaymentHSM', 'Microsoft.StreamAnalytics/streamingJobs', + 'Microsoft.DBforPostgreSQL/serversv2', 'Microsoft.AzureCosmosDB/clusters', 'Microsoft.MachineLearningServices/workspaces', + 'Microsoft.DBforPostgreSQL/singleServers', 'Microsoft.DBforPostgreSQL/flexibleServers', 'Microsoft.DBforMySQL/serversv2', + 'Microsoft.DBforMySQL/flexibleServers', 'Microsoft.ApiManagement/service', 'Microsoft.Synapse/workspaces', + 'Microsoft.PowerPlatform/vnetaccesslinks', 'Microsoft.Network/managedResolvers', 'Microsoft.Kusto/clusters'] + ), + actions=dict( + type='list', + default=[] + ) +) + + +def subnet_to_dict(subnet): + result = dict( + id=subnet.id, + name=subnet.name, + provisioning_state=subnet.provisioning_state, + address_prefix=subnet.address_prefix, + address_prefixes=subnet.address_prefixes, + network_security_group=dict(), + route_table=dict(), + private_endpoint_network_policies=subnet.private_endpoint_network_policies, + private_link_service_network_policies=subnet.private_link_service_network_policies, + nat_gateway=None + ) + if subnet.network_security_group: + id_keys = azure_id_to_dict(subnet.network_security_group.id) + result['network_security_group']['id'] = subnet.network_security_group.id + result['network_security_group']['name'] = id_keys['networkSecurityGroups'] + result['network_security_group']['resource_group'] = id_keys['resourceGroups'] + if subnet.route_table: + id_keys = azure_id_to_dict(subnet.route_table.id) + result['route_table']['id'] = subnet.route_table.id + result['route_table']['name'] = id_keys['routeTables'] + result['route_table']['resource_group'] = id_keys['resourceGroups'] + if subnet.service_endpoints: + result['service_endpoints'] = [{'service': item.service, 'locations': item.locations or []} for item in subnet.service_endpoints] + if subnet.delegations: + result['delegations'] = [{'name': item.name, 'serviceName': item.service_name, 'actions': item.actions or []} for item in subnet.delegations] + if subnet.nat_gateway: + result['nat_gateway'] = subnet.nat_gateway.id + return result + + +class AzureRMSubnet(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True), + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + virtual_network_name=dict(type='str', required=True, aliases=['virtual_network']), + address_prefix_cidr=dict(type='str', aliases=['address_prefix']), + address_prefixes_cidr=dict(type='list', aliases=['address_prefixes']), + security_group=dict(type='raw', aliases=['security_group_name']), + route_table=dict(type='raw'), + service_endpoints=dict( + type='list' + ), + private_endpoint_network_policies=dict( + type='str', + default='Enabled', + choices=['Enabled', 'Disabled'] + ), + private_link_service_network_policies=dict( + type='str', + default='Enabled', + choices=['Enabled', 'Disabled'] + ), + delegations=dict( + type='list', + elements='dict', + options=delegations_spec + ), + nat_gateway=dict(type='str') + ) + + mutually_exclusive = [['address_prefix_cidr', 'address_prefixes_cidr']] + + self.results = dict( + changed=False, + state=dict() + ) + + self.resource_group = None + self.name = None + self.state = None + self.virtual_network_name = None + self.address_prefix_cidr = None + self.address_prefixes_cidr = None + self.security_group = None + self.route_table = None + self.service_endpoints = None + self.private_link_service_network_policies = None + self.private_endpoint_network_policies = None + self.delegations = None + self.nat_gateway = None + + super(AzureRMSubnet, self).__init__(self.module_arg_spec, + supports_check_mode=True, + supports_tags=False) + + def exec_module(self, **kwargs): + + nsg = None + subnet = None + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.delegations and len(self.delegations) > 1: + self.fail("Only one delegation is supported for a subnet") + + if self.address_prefix_cidr and not CIDR_PATTERN.match(self.address_prefix_cidr): + self.fail("Invalid address_prefix_cidr value {0}".format(self.address_prefix_cidr)) + + nsg = dict() + if self.security_group: + nsg = self.parse_nsg() + + nat_gateway = self.build_nat_gateway_id(self.nat_gateway) + + route_table = dict() + if self.route_table: + route_table = self.parse_resource_to_dict(self.route_table) + self.route_table = format_resource_id(val=route_table['name'], + subscription_id=route_table['subscription_id'], + namespace='Microsoft.Network', + types='routeTables', + resource_group=route_table['resource_group']) + + results = dict() + changed = False + + try: + self.log('Fetching subnet {0}'.format(self.name)) + subnet = self.network_client.subnets.get(self.resource_group, + self.virtual_network_name, + self.name) + self.check_provisioning_state(subnet, self.state) + results = subnet_to_dict(subnet) + + if self.state == 'present': + if self.private_endpoint_network_policies is not None: + if results['private_endpoint_network_policies'] != self.private_endpoint_network_policies: + self.log("CHANGED: subnet {0} private_endpoint_network_policies".format(self.private_endpoint_network_policies)) + changed = True + results['private_endpoint_network_policies'] = self.private_endpoint_network_policies + else: + subnet['private_endpoint_network_policies'] = results['private_endpoint_network_policies'] + if self.private_link_service_network_policies is not None: + if results['private_link_service_network_policies'] != self.private_link_service_network_policies is not None: + self.log("CHANGED: subnet {0} private_link_service_network_policies".format(self.private_link_service_network_policies)) + changed = True + results['private_link_service_network_policies'] = self.private_link_service_network_policies + else: + subnet['private_link_service_network_policies'] = results['private_link_service_network_policies'] + + if self.address_prefix_cidr and results['address_prefix'] != self.address_prefix_cidr: + self.log("CHANGED: subnet {0} address_prefix_cidr".format(self.name)) + changed = True + results['address_prefix'] = self.address_prefix_cidr + if self.address_prefixes_cidr and results['address_prefixes'] != self.address_prefixes_cidr: + self.log("CHANGED: subnet {0} address_prefixes_cidr".format(self.name)) + changed = True + results['address_prefixes'] = self.address_prefixes_cidr + + if self.security_group is not None and results['network_security_group'].get('id') != nsg.get('id'): + self.log("CHANGED: subnet {0} network security group".format(self.name)) + changed = True + results['network_security_group']['id'] = nsg.get('id') + results['network_security_group']['name'] = nsg.get('name') + if self.route_table is not None: + if self.route_table != results['route_table'].get('id'): + changed = True + results['route_table']['id'] = self.route_table + self.log("CHANGED: subnet {0} route_table to {1}".format(self.name, route_table.get('name'))) + else: + if results['route_table'].get('id') is not None: + changed = True + results['route_table']['id'] = None + self.log("CHANGED: subnet {0} will dissociate to route_table {1}".format(self.name, route_table.get('name'))) + + if self.service_endpoints or self.service_endpoints == []: + oldd = {} + for item in self.service_endpoints: + name = item['service'] + locations = item.get('locations') or [] + oldd[name] = {'service': name, 'locations': locations.sort()} + newd = {} + if 'service_endpoints' in results: + for item in results['service_endpoints']: + name = item['service'] + locations = item.get('locations') or [] + newd[name] = {'service': name, 'locations': locations.sort()} + if newd != oldd: + changed = True + results['service_endpoints'] = self.service_endpoints + + if self.delegations: + oldde = {} + for item in self.delegations: + name = item['name'] + serviceName = item['serviceName'] + actions = item.get('actions') or [] + oldde[name] = {'name': name, 'serviceName': serviceName, 'actions': actions.sort()} + newde = {} + if 'delegations' in results: + for item in results['delegations']: + name = item['name'] + serviceName = item['serviceName'] + actions = item.get('actions') or [] + newde[name] = {'name': name, 'serviceName': serviceName, 'actions': actions.sort()} + if newde != oldde: + changed = True + results['delegations'] = self.delegations + + if nat_gateway is not None: + if nat_gateway != results['nat_gateway']: + changed = True + # Update associated NAT Gateway + results['nat_gateway'] = nat_gateway + else: + if results['nat_gateway'] is not None: + changed = True + # Disassociate NAT Gateway + results['nat_gateway'] = None + + elif self.state == 'absent': + changed = True + except ResourceNotFoundError: + # the subnet does not exist + if self.state == 'present': + changed = True + + self.results['changed'] = changed + self.results['state'] = results + + if not self.check_mode: + + if self.state == 'present' and changed: + if not subnet: + # create new subnet + if not self.address_prefix_cidr and not self.address_prefixes_cidr: + self.fail('address_prefix_cidr or address_prefixes_cidr is not set') + self.log('Creating subnet {0}'.format(self.name)) + subnet = self.network_models.Subnet( + address_prefix=self.address_prefix_cidr, + address_prefixes=self.address_prefixes_cidr + ) + if nsg: + subnet.network_security_group = self.network_models.NetworkSecurityGroup(id=nsg.get('id')) + if self.route_table: + subnet.route_table = self.network_models.RouteTable(id=self.route_table) + if self.service_endpoints: + subnet.service_endpoints = self.service_endpoints + if self.private_endpoint_network_policies: + subnet.private_endpoint_network_policies = self.private_endpoint_network_policies + if self.private_link_service_network_policies: + subnet.private_link_service_network_policies = self.private_link_service_network_policies + if self.delegations: + subnet.delegations = self.delegations + if nat_gateway: + subnet.nat_gateway = self.network_models.SubResource(id=nat_gateway) + else: + # update subnet + self.log('Updating subnet {0}'.format(self.name)) + subnet = self.network_models.Subnet( + address_prefix=results['address_prefix'], + address_prefixes=results['address_prefixes'] + ) + if results['network_security_group'].get('id') is not None: + subnet.network_security_group = self.network_models.NetworkSecurityGroup(id=results['network_security_group'].get('id')) + if results['route_table'].get('id') is not None: + subnet.route_table = self.network_models.RouteTable(id=results['route_table'].get('id')) + + if results.get('service_endpoints') is not None: + subnet.service_endpoints = results['service_endpoints'] + if results.get('private_link_service_network_policies') is not None: + subnet.private_link_service_network_policies = results['private_link_service_network_policies'] + if results.get('private_endpoint_network_policies') is not None: + subnet.private_endpoint_network_policies = results['private_endpoint_network_policies'] + if results.get('delegations') is not None: + subnet.delegations = results['delegations'] + if results.get('nat_gateway') is not None: + subnet.nat_gateway = self.network_models.SubResource(id=results['nat_gateway']) + + self.results['state'] = self.create_or_update_subnet(subnet) + elif self.state == 'absent' and changed: + # delete subnet + self.delete_subnet() + # the delete does not actually return anything. if no exception, then we'll assume + # it worked. + self.results['state']['status'] = 'Deleted' + + return self.results + + def create_or_update_subnet(self, subnet): + try: + poller = self.network_client.subnets.begin_create_or_update(self.resource_group, + self.virtual_network_name, + self.name, + subnet) + new_subnet = self.get_poller_result(poller) + except Exception as exc: + self.fail("Error creating or updating subnet {0} - {1}".format(self.name, str(exc))) + self.check_provisioning_state(new_subnet) + return subnet_to_dict(new_subnet) + + def delete_subnet(self): + self.log('Deleting subnet {0}'.format(self.name)) + try: + poller = self.network_client.subnets.begin_delete(self.resource_group, + self.virtual_network_name, + self.name) + result = self.get_poller_result(poller) + except Exception as exc: + self.fail("Error deleting subnet {0} - {1}".format(self.name, str(exc))) + + return result + + def parse_nsg(self): + nsg = self.security_group + resource_group = self.resource_group + if isinstance(self.security_group, dict): + nsg = self.security_group.get('name') + resource_group = self.security_group.get('resource_group', self.resource_group) + id = format_resource_id(val=nsg, + subscription_id=self.subscription_id, + namespace='Microsoft.Network', + types='networkSecurityGroups', + resource_group=resource_group) + name = azure_id_to_dict(id).get('name') + return dict(id=id, name=name) + + def build_nat_gateway_id(self, resource): + """ + Common method to build a resource id from different inputs + """ + if resource is None: + return None + if is_valid_resource_id(resource): + return resource + resource_dict = self.parse_resource_to_dict(resource) + return format_resource_id(val=resource_dict['name'], + subscription_id=resource_dict.get('subscription_id'), + namespace='Microsoft.Network', + types='natGateways', + resource_group=resource_dict.get('resource_group')) + + +def main(): + AzureRMSubnet() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_subnet_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_subnet_info.py new file mode 100644 index 000000000..ec75ff6b9 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_subnet_info.py @@ -0,0 +1,299 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Zim Kalinowski, (@zikalino) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_subnet_info +version_added: "0.1.2" +short_description: Get Azure Subnet facts +description: + - Get facts of Azure Subnet. + +options: + resource_group: + description: + - The name of the resource group. + required: True + virtual_network_name: + description: + - The name of the virtual network. + required: True + name: + description: + - The name of the subnet. + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Get facts of specific subnet + azure_rm_subnet_info: + resource_group: myResourceGroup + virtual_network_name: myVirtualNetwork + name: mySubnet + + - name: List facts for all subnets in virtual network + azure_rm_subnet_info: + resource_group: myResourceGroup + virtual_network_name: myVirtualNetwork + name: mySubnet +''' + +RETURN = ''' +subnets: + description: + - A list of dictionaries containing facts for subnet. + returned: always + type: complex + contains: + id: + description: + - Subnet resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/my + VirtualNetwork/subnets/mySubnet" + resource_group: + description: + - Name of resource group. + returned: always + type: str + sample: myResourceGroup + virtual_network_name: + description: + - Name of the containing virtual network. + returned: always + type: str + sample: myVirtualNetwork + name: + description: + - Name of the subnet. + returned: always + type: str + sample: mySubnet + address_prefix_cidr: + description: + - CIDR defining the IPv4 address space of the subnet. + returned: always + type: str + sample: "10.1.0.0/16" + address_prefixes_cidr: + description: + - CIDR defining the IPv4 and IPv6 address space of the subnet. + returned: always + type: list + sample: ["10.2.0.0/24", "fdda:e69b:1587:495e::/64"] + route_table: + description: + - Associated route table ID. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/routeTables/myRouteTable + private_endpoint_network_policies: + description: + - C(Enabled) or C(Disabled) apply network policies on private endpoints in the subnet. + returned: always + type: str + sample: Enabled + private_link_service_network_policies: + description: + - C(Enabled) or C(Disabled) apply network policies on private link service in the subnet. + returned: always + type: str + sample: Disabled + security_group: + description: + - Associated security group ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/networkSecurityGr + oups/myNsg" + service_endpoints: + description: + - List of service endpoints. + type: list + returned: when available + contains: + service: + description: + - The type of the endpoint service. + returned: always + type: str + sample: Microsoft.Sql + locations: + description: + - A list of location names. + type: list + returned: always + sample: [ 'eastus', 'westus' ] + provisioning_state: + description: + - Provisioning state. + returned: always + type: str + sample: Succeeded + delegations: + description: + - Associated delegation of subnets + returned: always + type: list + contains: + name: + description: + - name of delegation + returned: when delegation is present + type: str + sample: "delegationname" + serviceName: + description: + - service associated to delegation + returned: when delegation is present + type: str + sample: "Microsoft.ContainerInstance/containerGroups" + actions: + description: + - list of actions associated with service of delegation + returned : when delegation is present + type: list + sample: ["Microsoft.Network/virtualNetworks/subnets/action"] + provisioning_state: + description: + - Provisioning state of delegation. + returned: when delegation is present + type: str + sample: Succeeded + provisioning_state: + description: + - Provisioning state. + returned: always + type: str + sample: Succeeded + nat_gateway: + description: + - ID of the associated NAT Gateway. + returned: when available + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/natGateways/myGw" +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMSubnetInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + virtual_network_name=dict( + type='str', + required=True + ), + name=dict( + type='str' + ) + ) + # store the results of the module operation + self.results = dict( + changed=False + ) + self.resource_group = None + self.virtual_network_name = None + self.name = None + super(AzureRMSubnetInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_subnet_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_subnet_facts' module has been renamed to 'azure_rm_subnet_info'", version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.name is not None: + self.results['subnets'] = self.get() + else: + self.results['subnets'] = self.list() + + return self.results + + def get(self): + response = None + results = [] + try: + response = self.network_client.subnets.get(resource_group_name=self.resource_group, + virtual_network_name=self.virtual_network_name, + subnet_name=self.name) + self.log("Response : {0}".format(response)) + except ResourceNotFoundError as e: + self.fail('Could not get facts for Subnet.') + + if response is not None: + results.append(self.format_response(response)) + + return results + + def list(self): + response = None + results = [] + try: + response = self.network_client.subnets.get(resource_group_name=self.resource_group, + virtual_network_name=self.virtual_network_name) + self.log("Response : {0}".format(response)) + except ResourceNotFoundError as e: + self.fail('Could not get facts for Subnet.') + + if response is not None: + for item in response: + results.append(self.format_item(item)) + + return results + + def format_response(self, item): + d = item.as_dict() + d = { + 'resource_group': self.resource_group, + 'virtual_network_name': self.parse_resource_to_dict(d.get('id')).get('name'), + 'name': d.get('name'), + 'id': d.get('id'), + 'address_prefix_cidr': d.get('address_prefix'), + 'address_prefixes_cidr': d.get('address_prefixes'), + 'route_table': d.get('route_table', {}).get('id'), + 'security_group': d.get('network_security_group', {}).get('id'), + 'provisioning_state': d.get('provisioning_state'), + 'service_endpoints': d.get('service_endpoints'), + 'private_endpoint_network_policies': d.get('private_endpoint_network_policies'), + 'private_link_service_network_policies': d.get('private_link_service_network_policies'), + 'delegations': d.get('delegations'), + 'nat_gateway': d.get('nat_gateway', {}).get('id') + } + return d + + +def main(): + AzureRMSubnetInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_subscription_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_subscription_info.py new file mode 100644 index 000000000..9caeb3ef7 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_subscription_info.py @@ -0,0 +1,223 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 Paul Aiton, < @paultaiton > +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_subscription_info + +version_added: "1.2.0" + +short_description: Get Azure Subscription facts + +description: + - Get facts for a specific subscription or all subscriptions. + +options: + id: + description: + - Limit results to a specific subscription by id. + - Mutually exclusive with I(name). + type: str + name: + description: + - Limit results to a specific subscription by name. + - Mutually exclusive with I(id). + aliases: + - subscription_name + type: str + all: + description: + - If true, will return all subscriptions. + - If false will omit disabled subscriptions (default). + - Option has no effect when searching by id or name, and will be silently ignored. + type: bool + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key:value'. + - Option has no effect when searching by id or name, and will be silently ignored. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Paul Aiton (@paultaiton) +''' + +EXAMPLES = ''' +- name: Get facts for one subscription by id + azure_rm_subscription_info: + id: 00000000-0000-0000-0000-000000000000 + +- name: Get facts for one subscription by name + azure_rm_subscription_info: + name: "my-subscription" + +- name: Get facts for all subscriptions, including ones that are disabled. + azure_rm_subscription_info: + all: True + +- name: Get facts for subscriptions containing tags provided. + azure_rm_subscription_info: + tags: + - testing + - foo:bar +''' + +RETURN = ''' +subscriptions: + description: + - List of subscription dicts. + returned: always + type: list + contains: + display_name: + description: Subscription display name. + returned: always + type: str + sample: my-subscription + fqid: + description: Subscription fully qualified id. + returned: always + type: str + sample: "/subscriptions/00000000-0000-0000-0000-000000000000" + subscription_id: + description: Subscription guid. + returned: always + type: str + sample: "00000000-0000-0000-0000-000000000000" + state: + description: Subscription state. + returned: always + type: str + sample: "'Enabled' or 'Disabled'" + tags: + description: Tags assigned to resource group. + returned: always + type: dict + sample: { "tag1": "value1", "tag2": "value2" } + tenant_id: + description: Subscription tenant id + returned: always + type: str + sample: "00000000-0000-0000-0000-000000000000" +''' + +try: + from azure.core.exceptions import ResourceNotFoundError +except Exception: + # This is handled in azure_rm_common + pass + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + + +AZURE_OBJECT_CLASS = 'Subscription' + + +class AzureRMSubscriptionInfo(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type='str', aliases=['subscription_name']), + id=dict(type='str'), + tags=dict(type='list', elements='str'), + all=dict(type='bool') + ) + + self.results = dict( + changed=False, + subscriptions=[] + ) + + self.name = None + self.id = None + self.tags = None + self.all = False + + mutually_exclusive = [['name', 'id']] + + super(AzureRMSubscriptionInfo, self).__init__(self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + mutually_exclusive=mutually_exclusive, + facts_module=True) + + def exec_module(self, **kwargs): + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.id and self.name: + self.fail("Parameter error: cannot search subscriptions by both name and id.") + + result = [] + + if self.id: + result = self.get_item() + else: + result = self.list_items() + + self.results['subscriptions'] = result + return self.results + + def get_item(self): + self.log('Get properties for {0}'.format(self.id)) + item = None + result = [] + + try: + item = self.subscription_client.subscriptions.get(self.id) + except ResourceNotFoundError: + pass + + result = self.to_dict(item) + + return result + + def list_items(self): + self.log('List all items') + try: + response = self.subscription_client.subscriptions.list() + except Exception as exc: + self.fail("Failed to list all items - {0}".format(str(exc))) + + results = [] + for item in response: + # If the name matches, return result regardless of anything else. + # If name is not defined and either state is Enabled or all is true, and tags match, return result. + if self.name and self.name.lower() == item.display_name.lower(): + results.append(self.to_dict(item)) + elif not self.name and (self.all or item.state == "Enabled"): + results.append(self.to_dict(item)) + + return results + + def to_dict(self, subscription_object): + if self.has_tags(subscription_object.tags, self.tags): + return dict( + display_name=subscription_object.display_name, + fqid=subscription_object.id, + state=subscription_object.state, + subscription_id=subscription_object.subscription_id, + tags=subscription_object.tags, + tenant_id=subscription_object.tenant_id + ) + else: + return dict() + + +def main(): + AzureRMSubscriptionInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_trafficmanager.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_trafficmanager.py new file mode 100644 index 000000000..422a8df14 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_trafficmanager.py @@ -0,0 +1,576 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Hai Cao, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_trafficmanager +version_added: "0.1.2" +short_description: Manage a Traffic Manager profile. +description: + - Create, update and delete a Traffic Manager profile. + +options: + resource_group: + description: + - Name of a resource group where the Traffic Manager profile exists or will be created. + required: true + name: + description: + - Name of the Traffic Manager profile. + required: true + state: + description: + - Assert the state of the Traffic Manager profile. Use C(present) to create or update a Traffic Manager profile and C(absent) to delete it. + default: present + choices: + - absent + - present + location: + description: + - Valid azure location. Defaults to 'global'. + profile_status: + description: + - The status of the Traffic Manager profile. + default: Enabled + choices: + - Enabled + - Disabled + traffic_routing_method: + description: + - The traffic routing method of the Traffic Manager profile. + default: Performance + choices: + - Performance + - Priority + - Weighted + - Geographic + dns_config: + description: + - The DNS settings of the Traffic Manager profile. + suboptions: + relative_name: + description: + - The relative DNS name provided by this Traffic Manager profile. + - If no provided, name of the Traffic Manager will be used + ttl: + description: + - The DNS Time-To-Live (TTL), in seconds. + default: 60 + monitor_config: + description: + - The endpoint monitoring settings of the Traffic Manager profile. + suboptions: + protocol: + description: + - The protocol (HTTP, HTTPS or TCP) used to probe for endpoint health. + choices: + - HTTP + - HTTPS + - TCP + port: + description: + - The TCP port used to probe for endpoint health. + path: + description: + - The path relative to the endpoint domain name used to probe for endpoint health. + interval_in_seconds: + description: + - The monitor interval for endpoints in this profile. + timeout_in_seconds: + description: + - The monitor timeout for endpoints in this profile. + tolerated_number_of_failures: + description: + - The number of consecutive failed health check before declaring an endpoint in this profile Degraded after the next failed health check. + default: + protocol: HTTP + port: 80 + path: / + endpoints: + description: + - The list of endpoints in the Traffic Manager profile. + suboptions: + id: + description: + - Fully qualified resource Id for the resource. + name: + description: + - The name of the endpoint. + required: true + type: + description: + - The type of the endpoint. Ex- Microsoft.network/TrafficManagerProfiles/ExternalEndpoints. + required: true + target_resource_id: + description: + - The Azure Resource URI of the of the endpoint. + - Not applicable to endpoints of type 'ExternalEndpoints'. + target: + description: + - The fully-qualified DNS name of the endpoint. + endpoint_status: + description: + - The status of the endpoint. + choices: + - Enabled + - Disabled + weight: + description: + - The weight of this endpoint when using the 'Weighted' traffic routing method. + - Possible values are from 1 to 1000. + priority: + description: + - The priority of this endpoint when using the 'Priority' traffic routing method. + - Possible values are from 1 to 1000, lower values represent higher priority. + - This is an optional parameter. If specified, it must be specified on all endpoints. + - No two endpoints can share the same priority value. + endpoint_location: + description: + - Specifies the location of the external or nested endpoints when using the 'Performance' traffic routing method. + min_child_endpoints: + description: + - The minimum number of endpoints that must be available in the child profile in order for the parent profile to be considered available. + - Only applicable to endpoint of type 'NestedEndpoints'. + geo_mapping: + description: + - The list of countries/regions mapped to this endpoint when using the 'Geographic' traffic routing method. + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - "Hai Cao " + +''' + +EXAMPLES = ''' + - name: Create a Traffic Manager Profile + azure_rm_trafficmanager: + name: tmtest + resource_group: tmt + location: global + profile_status: Enabled + traffic_routing_method: Priority + dns_config: + relative_name: tmtest + ttl: 60 + monitor_config: + protocol: HTTPS + port: 80 + path: '/' + endpoints: + - name: e1 + type: Microsoft.network/TrafficManagerProfiles/ExternalEndpoints + endpoint_location: West US 2 + endpoint_status: Enabled + priority: 2 + target: 1.2.3.4 + weight: 1 + tags: + Environment: Test + + - name: Delete a Traffic Manager Profile + azure_rm_trafficmanager: + state: absent + name: tmtest + resource_group: tmt +''' +RETURN = ''' +state: + description: Current state of the Traffic Manager Profile + returned: always + type: dict + example: + "dns_config": { + "fqdn": "tmtest.trafficmanager.net", + "relative_name": "tmtest", + "ttl": 60 + } + "endpoints": [ + { + "endpoint_location": "West US 2", + "endpoint_monitor_status": "Degraded", + "endpoint_status": "Enabled", + "geo_mapping": null, + "id": "/subscriptions/XXXXXX...XXXXXXXXX/resourceGroups/tmt/providers/Microsoft.Network/trafficManagerProfiles/tmtest/externalEndpoints/e1", + "min_child_endpoints": null, + "name": "e1", + "priority": 2, + "target": "1.2.3.4", + "target_resource_id": null, + "type": "Microsoft.Network/trafficManagerProfiles/externalEndpoints", + "weight": 1 + } + ] + "id": "/subscriptions/XXXXXX...XXXXXXXXX/resourceGroups/tmt/providers/Microsoft.Network/trafficManagerProfiles/tmtest" + "location": "global" + "monitor_config": { + "interval_in_seconds": 30, + "path": "/", + "port": 80, + "profile_monitor_status": "Degraded", + "protocol": "HTTPS", + "timeout_in_seconds": 10, + "tolerated_number_of_failures": 3 + } + "name": "tmtest" + "profile_status": "Enabled" + "tags": { + "Environment": "Test" + } + "traffic_routing_method": "Priority" + "type": "Microsoft.Network/trafficManagerProfiles" +''' +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.mgmt.trafficmanager.models import ( + Profile, Endpoint, DnsConfig, MonitorConfig + ) +except ImportError: + # This is handled in azure_rm_common + pass + + +def traffic_manager_profile_to_dict(tmp): + result = dict( + id=tmp.id, + name=tmp.name, + type=tmp.type, + tags=tmp.tags, + location=tmp.location, + profile_status=tmp.profile_status, + traffic_routing_method=tmp.traffic_routing_method, + dns_config=dict(), + monitor_config=dict(), + endpoints=[] + ) + if tmp.dns_config: + result['dns_config']['relative_name'] = tmp.dns_config.relative_name + result['dns_config']['fqdn'] = tmp.dns_config.fqdn + result['dns_config']['ttl'] = tmp.dns_config.ttl + if tmp.monitor_config: + result['monitor_config']['profile_monitor_status'] = tmp.monitor_config.profile_monitor_status + result['monitor_config']['protocol'] = tmp.monitor_config.protocol + result['monitor_config']['port'] = tmp.monitor_config.port + result['monitor_config']['path'] = tmp.monitor_config.path + result['monitor_config']['interval_in_seconds'] = tmp.monitor_config.interval_in_seconds + result['monitor_config']['timeout_in_seconds'] = tmp.monitor_config.timeout_in_seconds + result['monitor_config']['tolerated_number_of_failures'] = tmp.monitor_config.tolerated_number_of_failures + if tmp.endpoints: + for endpoint in tmp.endpoints: + result['endpoints'].append(dict( + id=endpoint.id, + name=endpoint.name, + type=endpoint.type, + target_resource_id=endpoint.target_resource_id, + target=endpoint.target, + endpoint_status=endpoint.endpoint_status, + weight=endpoint.weight, + priority=endpoint.priority, + endpoint_location=endpoint.endpoint_location, + endpoint_monitor_status=endpoint.endpoint_monitor_status, + min_child_endpoints=endpoint.min_child_endpoints, + geo_mapping=endpoint.geo_mapping + )) + return result + + +def create_dns_config_instance(dns_config): + return DnsConfig( + relative_name=dns_config['relative_name'], + ttl=dns_config['ttl'] + ) + + +def create_monitor_config_instance(monitor_config): + return MonitorConfig( + profile_monitor_status=monitor_config['profile_monitor_status'], + protocol=monitor_config['protocol'], + port=monitor_config['port'], + path=monitor_config['path'], + interval_in_seconds=monitor_config['interval_in_seconds'], + timeout_in_seconds=monitor_config['timeout_in_seconds'], + tolerated_number_of_failures=monitor_config['tolerated_number_of_failures'] + ) + + +def create_endpoint_instance(endpoint): + return Endpoint( + id=endpoint['id'], + name=endpoint['name'], + type=endpoint['type'], + target_resource_id=endpoint['target_resource_id'], + target=endpoint['target'], + endpoint_status=endpoint['endpoint_status'], + weight=endpoint['weight'], + priority=endpoint['priority'], + endpoint_location=endpoint['endpoint_location'], + min_child_endpoints=endpoint['min_child_endpoints'], + geo_mapping=endpoint['geo_mapping'] + ) + + +def create_endpoints(endpoints): + return [create_endpoint_instance(endpoint) for endpoint in endpoints] + + +dns_config_spec = dict( + relative_name=dict(type='str'), + ttl=dict(type='int') +) + +monitor_config_spec = dict( + profile_monitor_status=dict(type='str'), + protocol=dict(type='str'), + port=dict(type='int'), + path=dict(type='str'), + interval_in_seconds=dict(type='int'), + timeout_in_seconds=dict(type='int'), + tolerated_number_of_failures=dict(type='int') +) + +endpoint_spec = dict( + id=dict(type='str'), + name=dict(type='str'), + type=dict(type='str'), + target_resource_id=dict(type='str'), + target=dict(type='str'), + endpoint_status=dict(type='str'), + weight=dict(type='int'), + priority=dict(type='int'), + endpoint_location=dict(type='str'), + endpoint_monitor_status=dict(type='str'), + min_child_endpoints=dict(type='int'), + geo_mapping=dict(type='list', elements='str') +) + + +class AzureRMTrafficManager(AzureRMModuleBase): + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ), + location=dict( + type='str' + ), + profile_status=dict( + type='str', + default='Enabled', + choices=['Enabled', 'Disabled'] + ), + traffic_routing_method=dict( + type='str', + default='Performance', + choices=['Performance', 'Priority', 'Weighted', 'Geographic'] + ), + dns_config=dict( + type='dict', + options=dns_config_spec + ), + monitor_config=dict( + type='dict', + default=dict( + protocol='HTTP', + port=80, + path='/' + ), + options=monitor_config_spec + ), + endpoints=dict( + type='list', + elements='dict', + options=endpoint_spec, + default=[] + ) + ) + + self.resource_group = None + self.name = None + self.state = None + self.tags = None + self.location = None + self.profile_status = None + self.traffic_routing_method = None + self.dns_config = None + self.monitor_config = None + self.endpoints = None + + self.results = dict( + changed=False + ) + + super(AzureRMTrafficManager, self).__init__(derived_arg_spec=self.module_arg_spec, supports_check_mode=True) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + to_be_updated = False + + if not self.dns_config: + self.dns_config = dict( + relative_name=self.name, + ttl=60 + ) + + if not self.location: + self.location = 'global' + + response = self.get_traffic_manager_profile() + + if self.state == 'present': + if not response: + to_be_updated = True + else: + self.results = response + self.log('Results : {0}'.format(response)) + update_tags, response['tags'] = self.update_tags(response['tags']) + + if update_tags: + to_be_updated = True + + to_be_updated = to_be_updated or self.check_update(response) + + if to_be_updated: + self.log("Need to Create / Update the Traffic Manager profile") + + if not self.check_mode: + self.results = self.ceate_update_traffic_manager_profile() + self.log("Creation / Update done.") + + self.results['changed'] = True + return self.results + + elif self.state == 'absent' and response: + self.log("Need to delete the Traffic Manager profile") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_traffic_manager_profile() + + self.log("Traffic Manager profile deleted") + + return self.results + + def get_traffic_manager_profile(self): + ''' + Gets the properties of the specified Traffic Manager profile + + :return: deserialized Traffic Manager profile dict + ''' + self.log("Checking if Traffic Manager profile {0} is present".format(self.name)) + try: + response = self.traffic_manager_management_client.profiles.get(self.resource_group, self.name) + self.log("Response : {0}".format(response)) + self.log("Traffic Manager profile : {0} found".format(response.name)) + return traffic_manager_profile_to_dict(response) + except ResourceNotFoundError: + self.log('Did not find the Traffic Manager profile.') + return False + + def delete_traffic_manager_profile(self): + ''' + Deletes the specified Traffic Manager profile in the specified subscription and resource group. + :return: True + ''' + + self.log("Deleting the Traffic Manager profile {0}".format(self.name)) + try: + operation_result = self.traffic_manager_management_client.profiles.delete(self.resource_group, self.name) + return True + except Exception as e: + self.log('Error attempting to delete the Traffic Manager profile.') + self.fail("Error deleting the Traffic Manager profile: {0}".format(e.message)) + return False + + def ceate_update_traffic_manager_profile(self): + ''' + Creates or updates a Traffic Manager profile. + + :return: deserialized Traffic Manager profile state dictionary + ''' + self.log("Creating / Updating the Traffic Manager profile {0}".format(self.name)) + + parameters = Profile( + tags=self.tags, + location=self.location, + profile_status=self.profile_status, + traffic_routing_method=self.traffic_routing_method, + dns_config=create_dns_config_instance(self.dns_config) if self.dns_config else None, + monitor_config=create_monitor_config_instance(self.monitor_config) if self.monitor_config else None, + endpoints=create_endpoints(self.endpoints) + ) + try: + response = self.traffic_manager_management_client.profiles.create_or_update(self.resource_group, self.name, parameters) + return traffic_manager_profile_to_dict(response) + except Exception as exc: + self.log('Error attempting to create the Traffic Manager.') + self.fail("Error creating the Traffic Manager: {0}".format(exc.message)) + + def check_update(self, response): + if response['location'] != self.location: + self.log("Location Diff - Origin {0} / Update {1}".format(response['location'], self.location)) + return True + + if response['profile_status'] != self.profile_status: + self.log("Profile Status Diff - Origin {0} / Update {1}".format(response['profile_status'], self.profile_status)) + return True + + if response['traffic_routing_method'] != self.traffic_routing_method: + self.log("Traffic Routing Method Diff - Origin {0} / Update {1}".format(response['traffic_routing_method'], self.traffic_routing_method)) + return True + + if (response['dns_config']['relative_name'] != self.dns_config['relative_name'] or response['dns_config']['ttl'] != self.dns_config['ttl']): + self.log("DNS Config Diff - Origin {0} / Update {1}".format(response['dns_config'], self.dns_config)) + return True + + for k, v in self.monitor_config.items(): + if v: + if str(v).lower() != str(response['monitor_config'][k]).lower(): + self.log("Monitor Config Diff - Origin {0} / Update {1}".format(response['monitor_config'], self.monitor_config)) + return True + + if len(response['endpoints']) != len(self.endpoints): + self.log("Endpoints Diff - Origin {0} / Update {1}".format(response['endpoints'], self.endpoints)) + return True + else: + for e1, e2 in zip(sorted(self.endpoints, key=lambda k: k['name']), sorted(response['endpoints'], key=lambda k: k['name'])): + for k, v in e1.items(): + if v: + if str(v).lower() != str(e2[k]).lower(): + self.log("Endpoints Diff - Origin {0} / Update {1}".format(response['endpoints'], self.endpoints)) + return True + return False + + +def main(): + """Main execution""" + AzureRMTrafficManager() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_trafficmanagerendpoint.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_trafficmanagerendpoint.py new file mode 100644 index 000000000..df87ed93f --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_trafficmanagerendpoint.py @@ -0,0 +1,371 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Hai Cao, , Yunge Zhu +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_trafficmanagerendpoint +version_added: "0.1.2" +short_description: Manage Azure Traffic Manager endpoint +description: + - Create, update and delete Azure Traffic Manager endpoint. + +options: + resource_group: + description: + - Name of a resource group where the Traffic Manager endpoint exists or will be created. + type: str + required: true + name: + description: + - The name of the endpoint. + type: str + required: true + profile_name: + description: + - Name of Traffic Manager profile where this endpoints attaches to. + type: str + required: true + type: + description: + - The type of the endpoint. + required: true + choices: + - azure_endpoints + - external_endpoints + - nested_endpoints + target_resource_id: + description: + - The Azure Resource URI of the of the endpoint. + - Not applicable to endpoints of I(type=external_endpoints). + type: str + target: + description: + - The fully-qualified DNS name of the endpoint. + type: str + enabled: + description: + - The status of the endpoint. + type: bool + default: true + weight: + description: + - The weight of this endpoint when traffic manager profile has routing_method of C(weighted). + - Possible values are from 1 to 1000. + type: int + priority: + description: + - The priority of this endpoint when traffic manager profile has routing_method of I(priority). + - Possible values are from 1 to 1000, lower values represent higher priority. + - This is an optional parameter. If specified, it must be specified on all endpoints. + - No two endpoints can share the same priority value. + type: int + location: + description: + - Specifies the location of the external or nested endpoints when using the 'Performance' traffic routing method. + type: str + min_child_endpoints: + description: + - The minimum number of endpoints that must be available in the child profile in order for the parent profile to be considered available. + - Only applicable to endpoint of I(type=nested_endpoints). + type: int + geo_mapping: + description: + - The list of countries/regions mapped to this endpoint when traffic manager profile has routing_method of C(geographic). + type: list + elements: str + state: + description: + - Assert the state of the Traffic Manager endpoint. Use C(present) to create or update a Traffic Manager endpoint and C(absent) to delete it. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Hai Cao (@caohai) + - Yunge Zhu (@yungezz) + +''' + +EXAMPLES = ''' + - name: create a endpoint for a traffic manager profile + azure_rm_trafficmanagerendpoint: + resource_group: testresourcegroup + profile_name: myprofilename + name: testendpoint1 + type: external_endpoints + location: westus + priority: 2 + weight: 1 + target: 1.2.3.4 +''' + +RETURN = ''' +id: + description: + - The ID of the traffic manager endpoint. + returned: when traffic manager endpoint exists + type: str + example: + "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/trafficManagerProfiles/testProfil + e/externalEndpoints/testendpoint" +''' +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase, normalize_location_name +from ansible.module_utils.common.dict_transformations import _snake_to_camel + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.mgmt.trafficmanager.models import ( + Endpoint, DnsConfig, MonitorConfig + ) +except ImportError: + # This is handled in azure_rm_common + pass + + +def traffic_manager_endpoint_to_dict(endpoint): + return dict( + id=endpoint.id, + name=endpoint.name, + type=endpoint.type, + target_resource_id=endpoint.target_resource_id, + target=endpoint.target, + status=endpoint.endpoint_status, + weight=endpoint.weight, + priority=endpoint.priority, + location=endpoint.endpoint_location, + monitor_status=endpoint.endpoint_monitor_status, + min_child_endpoints=endpoint.min_child_endpoints, + geo_mapping=endpoint.geo_mapping + ) + + +class Actions: + NoAction, CreateOrUpdate, Delete = range(3) + + +class AzureRMTrafficManagerEndpoint(AzureRMModuleBase): + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + profile_name=dict( + type='str', + required=True + ), + type=dict( + type='str', + choices=['azure_endpoints', 'external_endpoints', 'nested_endpoints'], + required=True + ), + target=dict(type='str'), + target_resource_id=dict(type='str'), + enabled=dict(type='bool', default=True), + weight=dict(type='int'), + priority=dict(type='int'), + location=dict(type='str'), + min_child_endpoints=dict(type='int'), + geo_mapping=dict(type='list', elements='str'), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ), + ) + + self.resource_group = None + self.name = None + self.state = None + + self.profile_name = None + self.type = None + self.target_resource_id = None + self.enabled = None + self.weight = None + self.priority = None + self.location = None + self.min_child_endpoints = None + self.geo_mapping = None + self.endpoint_status = 'Enabled' + + self.action = Actions.NoAction + + self.results = dict( + changed=False + ) + + super(AzureRMTrafficManagerEndpoint, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=False) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()): + setattr(self, key, kwargs[key]) + + if self.type: + self.type = _snake_to_camel(self.type) + + to_be_updated = False + + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + self.location = resource_group.location + + if self.enabled is not None and self.enabled is False: + self.endpoint_status = 'Disabled' + + response = self.get_traffic_manager_endpoint() + + if response: + self.log('Results : {0}'.format(response)) + self.results['id'] = response['id'] + if self.state == 'present': + # check update + to_be_update = self.check_update(response) + if to_be_update: + self.action = Actions.CreateOrUpdate + + elif self.state == 'absent': + # delete + self.action = Actions.Delete + else: + if self.state == 'present': + self.action = Actions.CreateOrUpdate + elif self.state == 'absent': + # delete when no exists + self.fail("Traffic Manager endpoint {0} not exists.".format(self.name)) + + if self.action == Actions.CreateOrUpdate: + self.results['changed'] = True + if self.check_mode: + return self.results + + response = self.create_update_traffic_manager_endpoint() + self.results['id'] = response['id'] + + if self.action == Actions.Delete: + self.results['changed'] = True + if self.check_mode: + return self.results + response = self.delete_traffic_manager_endpoint() + + return self.results + + def get_traffic_manager_endpoint(self): + ''' + Gets the properties of the specified Traffic Manager endpoint + + :return: deserialized Traffic Manager endpoint dict + ''' + self.log("Checking if Traffic Manager endpoint {0} is present".format(self.name)) + try: + response = self.traffic_manager_management_client.endpoints.get(self.resource_group, self.profile_name, self.type, self.name) + self.log("Response : {0}".format(response)) + return traffic_manager_endpoint_to_dict(response) + except ResourceNotFoundError: + self.log('Did not find the Traffic Manager endpoint.') + return False + + def delete_traffic_manager_endpoint(self): + ''' + Deletes the specified Traffic Manager endpoint. + :return: True + ''' + + self.log("Deleting the Traffic Manager endpoint {0}".format(self.name)) + try: + operation_result = self.traffic_manager_management_client.endpoints.delete(self.resource_group, self.profile_name, self.type, self.name) + return True + except Exception as exc: + request_id = exc.request_id if exc.request_id else '' + self.fail("Error deleting the Traffic Manager endpoint {0}, request id {1} - {2}".format(self.name, request_id, str(exc))) + return False + + def create_update_traffic_manager_endpoint(self): + ''' + Creates or updates a Traffic Manager endpoint. + + :return: deserialized Traffic Manager endpoint state dictionary + ''' + self.log("Creating / Updating the Traffic Manager endpoint {0}".format(self.name)) + + parameters = Endpoint(target_resource_id=self.target_resource_id, + target=self.target, + endpoint_status=self.endpoint_status, + weight=self.weight, + priority=self.priority, + endpoint_location=self.location, + min_child_endpoints=self.min_child_endpoints, + geo_mapping=self.geo_mapping) + + try: + response = self.traffic_manager_management_client.endpoints.create_or_update(self.resource_group, + self.profile_name, + self.type, + self.name, + parameters) + return traffic_manager_endpoint_to_dict(response) + except Exception as exc: + request_id = exc.request_id if exc.request_id else '' + self.fail("Error creating the Traffic Manager endpoint {0}, request id {1} - {2}".format(self.name, request_id, str(exc))) + + def check_update(self, response): + if self.endpoint_status is not None and response['status'].lower() != self.endpoint_status.lower(): + self.log("Status Diff - Origin {0} / Update {1}".format(response['status'], self.endpoint_status)) + return True + + if self.type and response['type'].lower() != "Microsoft.network/TrafficManagerProfiles/{0}".format(self.type).lower(): + self.log("Type Diff - Origin {0} / Update {1}".format(response['type'], self.type)) + return True + + if self.target_resource_id and response['target_resource_id'] != self.target_resource_id: + self.log("target_resource_id Diff - Origin {0} / Update {1}".format(response['target_resource_id'], self.target_resource_id)) + return True + + if self.target and response['target'] != self.target: + self.log("target Diff - Origin {0} / Update {1}".format(response['target'], self.target)) + return True + + if self.weight and int(response['weight']) != self.weight: + self.log("weight Diff - Origin {0} / Update {1}".format(response['weight'], self.weight)) + return True + + if self.priority and int(response['priority']) != self.priority: + self.log("priority Diff - Origin {0} / Update {1}".format(response['priority'], self.priority)) + return True + + if self.min_child_endpoints and int(response['min_child_endpoints']) != self.min_child_endpoints: + self.log("min_child_endpoints Diff - Origin {0} / Update {1}".format(response['min_child_endpoints'], self.min_child_endpoints)) + return True + + if self.geo_mapping and response['geo_mapping'] != self.geo_mapping: + self.log("geo_mapping Diff - Origin {0} / Update {1}".format(response['geo_mapping'], self.geo_mapping)) + return True + + return False + + +def main(): + """Main execution""" + AzureRMTrafficManagerEndpoint() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_trafficmanagerendpoint_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_trafficmanagerendpoint_info.py new file mode 100644 index 000000000..94bd2ac87 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_trafficmanagerendpoint_info.py @@ -0,0 +1,307 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Hai Cao, , Yunge Zhu +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_trafficmanagerendpoint_info + +version_added: "0.1.2" + +short_description: Get Azure Traffic Manager endpoint facts + +description: + - Get facts for a specific Traffic Manager endpoints or all endpoints in a Traffic Manager profile. + +options: + name: + description: + - Limit results to a specific Traffic Manager endpoint. + resource_group: + description: + - The resource group to search for the desired Traffic Manager profile. + required: True + profile_name: + description: + - Name of Traffic Manager Profile. + required: True + type: + description: + - Type of endpoint. + choices: + - azure_endpoints + - external_endpoints + - nested_endpoints + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Hai Cao (@caohai) + - Yunge Zhu (@yungezz) +''' + +EXAMPLES = ''' + - name: Get endpoints facts of a Traffic Manager profile + azure_rm_trafficmanagerendpoint_info: + resource_group: myResourceGroup + profile_name: Testing + + - name: Get specific endpoint of a Traffic Manager profile + azure_rm_trafficmanager_info: + resource_group: myResourceGroup + profile_name: Testing + name: test_external_endpoint + +''' + +RETURN = ''' +endpoints: + description: + - List of Traffic Manager endpoints. + returned: always + type: complex + contains: + resource_group: + description: + - Name of a resource group. + returned: always + type: str + sample: myResourceGroup + name: + description: + - Name of the Traffic Manager endpoint. + returned: always + type: str + sample: testendpoint + type: + description: + - The type of the endpoint. + returned: always + type: str + sample: external_endpoints + target_resource_id: + description: + - The Azure Resource URI of the of the endpoint. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.ClassicCompute/domainNames/vscjavaci + target: + description: + - The fully-qualified DNS name of the endpoint. + returned: always + type: str + sample: 8.8.8.8 + enabled: + description: + - The status of the endpoint. + returned: always + type: str + sample: Enabled + weight: + description: + - The weight of this endpoint when using the 'Weighted' traffic routing method. + returned: always + type: int + sample: 10 + priority: + description: + - The priority of this endpoint when using the 'Priority' traffic routing method. + returned: always + type: str + sample: 3 + location: + description: + - The location of the external or nested endpoints when using the 'Performance' traffic routing method. + returned: always + type: str + sample: East US + min_child_endpoints: + description: + - The minimum number of endpoints that must be available in the child profile to make the parent profile available. + returned: always + type: int + sample: 3 + geo_mapping: + description: + - The list of countries/regions mapped to this endpoint when using the 'Geographic' traffic routing method. + returned: always + type: list + sample: [ + "GEO-NA", + "GEO-AS" + ] +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +from ansible.module_utils.common.dict_transformations import ( + _snake_to_camel, _camel_to_snake +) + +try: + from azure.core.exceptions import ResourceNotFoundError +except Exception: + # handled in azure_rm_common + pass + +import re + +AZURE_OBJECT_CLASS = 'TrafficManagerEndpoints' + + +def serialize_endpoint(endpoint, resource_group): + result = dict( + id=endpoint.id, + name=endpoint.name, + target_resource_id=endpoint.target_resource_id, + target=endpoint.target, + enabled=True, + weight=endpoint.weight, + priority=endpoint.priority, + location=endpoint.endpoint_location, + min_child_endpoints=endpoint.min_child_endpoints, + geo_mapping=endpoint.geo_mapping, + monitor_status=endpoint.endpoint_monitor_status, + resource_group=resource_group + ) + + if endpoint.endpoint_status and endpoint.endpoint_status == 'Disabled': + result['enabled'] = False + + if endpoint.type: + result['type'] = _camel_to_snake(endpoint.type.split("/")[-1]) + + return result + + +class AzureRMTrafficManagerEndpointInfo(AzureRMModuleBase): + """Utility class to get Azure Traffic Manager Endpoint facts""" + + def __init__(self): + + self.module_args = dict( + profile_name=dict( + type='str', + required=True), + resource_group=dict( + type='str', + required=True), + name=dict(type='str'), + type=dict( + type='str', + choices=[ + 'azure_endpoints', + 'external_endpoints', + 'nested_endpoints' + ]) + ) + + self.results = dict( + changed=False, + endpoints=[] + ) + + self.profile_name = None + self.name = None + self.resource_group = None + self.type = None + + super(AzureRMTrafficManagerEndpointInfo, self).__init__( + derived_arg_spec=self.module_args, + supports_check_mode=True, + supports_tags=False, + facts_module=True + ) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_trafficmanagerendpoint_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_trafficmanagerendpoint_facts' module has been renamed to 'azure_rm_trafficmanagerendpoint_info'", + version=(2.9, )) + + for key in self.module_args: + setattr(self, key, kwargs[key]) + + if self.type: + self.type = _snake_to_camel(self.type) + + if self.name and not self.resource_group: + self.fail("Parameter error: resource group required when filtering by name.") + + if self.name: + self.results['endpoints'] = self.get_item() + elif self.type: + self.results['endpoints'] = self.list_by_type() + else: + self.results['endpoints'] = self.list_by_profile() + + return self.results + + def get_item(self): + """Get a single Azure Traffic Manager endpoint""" + + self.log('Get properties for {0}'.format(self.name)) + + item = None + result = [] + + try: + item = self.traffic_manager_management_client.endpoints.get( + self.resource_group, self.profile_name, self.type, self.name) + except ResourceNotFoundError: + pass + + if item: + if (self.type and self.type == item.type) or self.type is None: + result = [self.serialize_tm(item)] + + return result + + def list_by_profile(self): + """Get all Azure Traffic Manager endpoints of a profile""" + + self.log('List all endpoints belongs to a Traffic Manager profile') + + try: + response = self.traffic_manager_management_client.profiles.get(self.resource_group, self.profile_name) + except Exception as exc: + self.fail('Failed to list all items - {0}'.format(str(exc))) + + results = [] + if response and response.endpoints: + for endpoint in response.endpoints: + results.append(serialize_endpoint(endpoint, self.resource_group)) + + return results + + def list_by_type(self): + """Get all Azure Traffic Managers endpoints of a profile by type""" + self.log('List all Traffic Manager endpoints of a profile by type') + try: + response = self.traffic_manager_management_client.profiles.get(self.resource_group, self.profile_name) + except Exception as exc: + self.fail('Failed to list all items - {0}'.format(str(exc))) + + results = [] + for item in response: + if item.endpoints: + for endpoint in item.endpoints: + if endpoint.type == self.type: + results.append(serialize_endpoint(endpoint, self.resource_group)) + return results + + +def main(): + """Main module execution code path""" + + AzureRMTrafficManagerEndpointInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_trafficmanagerprofile.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_trafficmanagerprofile.py new file mode 100644 index 000000000..b529666d3 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_trafficmanagerprofile.py @@ -0,0 +1,460 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Hai Cao, Yunge Zhu +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_trafficmanagerprofile +version_added: "0.1.2" +short_description: Manage Azure Traffic Manager profile +description: + - Create, update and delete a Traffic Manager profile. + +options: + resource_group: + description: + - Name of a resource group where the Traffic Manager profile exists or will be created. + required: true + name: + description: + - Name of the Traffic Manager profile. + required: true + state: + description: + - Assert the state of the Traffic Manager profile. Use C(present) to create or update a Traffic Manager profile and C(absent) to delete it. + default: present + choices: + - absent + - present + location: + description: + - Valid Azure location. Defaults to C(global) because in default public Azure cloud, Traffic Manager profile can only be deployed globally. + - Reference U(https://docs.microsoft.com/en-us/azure/traffic-manager/quickstart-create-traffic-manager-profile#create-a-traffic-manager-profile). + default: global + profile_status: + description: + - The status of the Traffic Manager profile. + default: enabled + choices: + - enabled + - disabled + routing_method: + description: + - The traffic routing method of the Traffic Manager profile. + default: performance + choices: + - performance + - priority + - weighted + - geographic + dns_config: + description: + - The DNS settings of the Traffic Manager profile. + suboptions: + relative_name: + description: + - The relative DNS name provided by this Traffic Manager profile. + - If not provided, name of the Traffic Manager will be used. + ttl: + description: + - The DNS Time-To-Live (TTL), in seconds. + type: int + default: 60 + monitor_config: + description: + - The endpoint monitoring settings of the Traffic Manager profile. + suboptions: + protocol: + description: + - The protocol C(HTTP), C(HTTPS) or C(TCP) used to probe for endpoint health. + choices: + - HTTP + - HTTPS + - TCP + port: + description: + - The TCP port used to probe for endpoint health. + path: + description: + - The path relative to the endpoint domain name used to probe for endpoint health. + interval: + description: + - The monitor interval for endpoints in this profile in seconds. + type: int + timeout: + description: + - The monitor timeout for endpoints in this profile in seconds. + type: int + tolerated_failures: + description: + - The number of consecutive failed health check before declaring an endpoint in this profile Degraded after the next failed health check. + default: + protocol: HTTP + port: 80 + path: / + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Hai Cao (@caohai) + - Yunge Zhu (@yungezz) + +''' + +EXAMPLES = ''' + - name: Create a Traffic Manager Profile + azure_rm_trafficmanagerprofile: + name: tmtest + resource_group: myResourceGroup + location: global + profile_status: enabled + routing_method: priority + dns_config: + relative_name: tmtest + ttl: 60 + monitor_config: + protocol: HTTPS + port: 80 + path: '/' + tags: + Environment: Test + + - name: Delete a Traffic Manager Profile + azure_rm_trafficmanagerprofile: + state: absent + name: tmtest + resource_group: myResourceGroup +''' +RETURN = ''' +id: + description: + - The ID of the traffic manager profile. + returned: when traffic manager profile exists + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/tmt/providers/Microsoft.Network/trafficManagerProfiles/tmtest" +endpoints: + description: + - List of endpoint IDs attached to the profile. + returned: when traffic manager endpoints exists + type: list + sample: [ + "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/tmt/providers/Microsoft.Network/trafficManagerProfiles/tm049b1ae293/exter + nalEndpoints/e2", + "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/tmt/providers/Microsoft.Network/trafficManagerProfiles/tm049b1ae293/exter + nalEndpoints/e1" + ] +''' +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase, normalize_location_name + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.mgmt.trafficmanager.models import ( + Profile, Endpoint, DnsConfig, MonitorConfig + ) +except ImportError: + # This is handled in azure_rm_common + pass + + +def shorten_traffic_manager_dict(tmd): + return dict( + id=tmd['id'], + endpoints=[endpoint['id'] for endpoint in tmd['endpoints']] if tmd['endpoints'] else [] + ) + + +def traffic_manager_profile_to_dict(tmp): + result = dict( + id=tmp.id, + name=tmp.name, + type=tmp.type, + tags=tmp.tags, + location=tmp.location, + profile_status=tmp.profile_status, + routing_method=tmp.traffic_routing_method, + dns_config=dict(), + monitor_config=dict(), + endpoints=[] + ) + if tmp.dns_config: + result['dns_config']['relative_name'] = tmp.dns_config.relative_name + result['dns_config']['fqdn'] = tmp.dns_config.fqdn + result['dns_config']['ttl'] = tmp.dns_config.ttl + if tmp.monitor_config: + result['monitor_config']['profile_monitor_status'] = tmp.monitor_config.profile_monitor_status + result['monitor_config']['protocol'] = tmp.monitor_config.protocol + result['monitor_config']['port'] = tmp.monitor_config.port + result['monitor_config']['path'] = tmp.monitor_config.path + result['monitor_config']['interval'] = tmp.monitor_config.interval_in_seconds + result['monitor_config']['timeout'] = tmp.monitor_config.timeout_in_seconds + result['monitor_config']['tolerated_failures'] = tmp.monitor_config.tolerated_number_of_failures + if tmp.endpoints: + for endpoint in tmp.endpoints: + result['endpoints'].append(dict( + id=endpoint.id, + name=endpoint.name, + type=endpoint.type, + target_resource_id=endpoint.target_resource_id, + target=endpoint.target, + endpoint_status=endpoint.endpoint_status, + weight=endpoint.weight, + priority=endpoint.priority, + endpoint_location=endpoint.endpoint_location, + endpoint_monitor_status=endpoint.endpoint_monitor_status, + min_child_endpoints=endpoint.min_child_endpoints, + geo_mapping=endpoint.geo_mapping + )) + return result + + +def create_dns_config_instance(dns_config): + return DnsConfig( + relative_name=dns_config['relative_name'], + ttl=dns_config['ttl'] + ) + + +def create_monitor_config_instance(monitor_config): + return MonitorConfig( + profile_monitor_status=monitor_config['profile_monitor_status'], + protocol=monitor_config['protocol'], + port=monitor_config['port'], + path=monitor_config['path'], + interval_in_seconds=monitor_config['interval'], + timeout_in_seconds=monitor_config['timeout'], + tolerated_number_of_failures=monitor_config['tolerated_failures'] + ) + + +dns_config_spec = dict( + relative_name=dict(type='str'), + ttl=dict(type='int') +) + +monitor_config_spec = dict( + profile_monitor_status=dict(type='str'), + protocol=dict(type='str'), + port=dict(type='int'), + path=dict(type='str'), + interval=dict(type='int'), + timeout=dict(type='int'), + tolerated_failures=dict(type='int') +) + + +class AzureRMTrafficManagerProfile(AzureRMModuleBase): + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ), + location=dict( + type='str', + default='global' + ), + profile_status=dict( + type='str', + default='enabled', + choices=['enabled', 'disabled'] + ), + routing_method=dict( + type='str', + default='performance', + choices=['performance', 'priority', 'weighted', 'geographic'] + ), + dns_config=dict( + type='dict', + options=dns_config_spec + ), + monitor_config=dict( + type='dict', + default=dict( + protocol='HTTP', + port=80, + path='/' + ), + options=monitor_config_spec + ), + ) + + self.resource_group = None + self.name = None + self.state = None + self.tags = None + self.location = None + self.profile_status = None + self.routing_method = None + self.dns_config = None + self.monitor_config = None + self.endpoints_copy = None + + self.results = dict( + changed=False + ) + + super(AzureRMTrafficManagerProfile, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + to_be_updated = False + + if not self.dns_config: + self.dns_config = dict( + relative_name=self.name, + ttl=60 + ) + + if not self.location: + self.location = 'global' + + response = self.get_traffic_manager_profile() + + if self.state == 'present': + if not response: + to_be_updated = True + else: + self.results = shorten_traffic_manager_dict(response) + self.log('Results : {0}'.format(response)) + update_tags, response['tags'] = self.update_tags(response['tags']) + + if update_tags: + to_be_updated = True + + to_be_updated = to_be_updated or self.check_update(response) + + if to_be_updated: + self.log("Need to Create / Update the Traffic Manager profile") + + if not self.check_mode: + self.results = shorten_traffic_manager_dict(self.create_update_traffic_manager_profile()) + self.log("Creation / Update done.") + + self.results['changed'] = True + return self.results + + elif self.state == 'absent' and response: + self.log("Need to delete the Traffic Manager profile") + self.results = shorten_traffic_manager_dict(response) + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_traffic_manager_profile() + + self.log("Traffic Manager profile deleted") + + return self.results + + def get_traffic_manager_profile(self): + ''' + Gets the properties of the specified Traffic Manager profile + + :return: deserialized Traffic Manager profile dict + ''' + self.log("Checking if Traffic Manager profile {0} is present".format(self.name)) + try: + response = self.traffic_manager_management_client.profiles.get(self.resource_group, self.name) + self.log("Response : {0}".format(response)) + self.log("Traffic Manager profile : {0} found".format(response.name)) + self.endpoints_copy = response.endpoints if response and response.endpoints else None + return traffic_manager_profile_to_dict(response) + except ResourceNotFoundError: + self.log('Did not find the Traffic Manager profile.') + return False + + def delete_traffic_manager_profile(self): + ''' + Deletes the specified Traffic Manager profile in the specified subscription and resource group. + :return: True + ''' + + self.log("Deleting the Traffic Manager profile {0}".format(self.name)) + try: + operation_result = self.traffic_manager_management_client.profiles.delete(self.resource_group, self.name) + return True + except Exception as e: + self.log('Error attempting to delete the Traffic Manager profile.') + self.fail("Error deleting the Traffic Manager profile: {0}".format(e.message)) + return False + + def create_update_traffic_manager_profile(self): + ''' + Creates or updates a Traffic Manager profile. + + :return: deserialized Traffic Manager profile state dictionary + ''' + self.log("Creating / Updating the Traffic Manager profile {0}".format(self.name)) + + parameters = Profile( + tags=self.tags, + location=self.location, + profile_status=self.profile_status, + traffic_routing_method=self.routing_method, + dns_config=create_dns_config_instance(self.dns_config) if self.dns_config else None, + monitor_config=create_monitor_config_instance(self.monitor_config) if self.monitor_config else None, + endpoints=self.endpoints_copy + ) + try: + response = self.traffic_manager_management_client.profiles.create_or_update(self.resource_group, self.name, parameters) + return traffic_manager_profile_to_dict(response) + except Exception as exc: + self.log('Error attempting to create the Traffic Manager.') + self.fail("Error creating the Traffic Manager: {0}".format(exc.message)) + + def check_update(self, response): + if self.location and normalize_location_name(response['location']) != normalize_location_name(self.location): + self.log("Location Diff - Origin {0} / Update {1}".format(response['location'], self.location)) + return True + + if self.profile_status and response['profile_status'].lower() != self.profile_status: + self.log("Profile Status Diff - Origin {0} / Update {1}".format(response['profile_status'], self.profile_status)) + return True + + if self.routing_method and response['routing_method'].lower() != self.routing_method: + self.log("Traffic Routing Method Diff - Origin {0} / Update {1}".format(response['routing_method'], self.routing_method)) + return True + + if self.dns_config and \ + (response['dns_config']['relative_name'] != self.dns_config['relative_name'] or response['dns_config']['ttl'] != self.dns_config['ttl']): + self.log("DNS Config Diff - Origin {0} / Update {1}".format(response['dns_config'], self.dns_config)) + return True + + for k, v in self.monitor_config.items(): + if v: + if str(v).lower() != str(response['monitor_config'][k]).lower(): + self.log("Monitor Config Diff - Origin {0} / Update {1}".format(response['monitor_config'], self.monitor_config)) + return True + return False + + +def main(): + """Main execution""" + AzureRMTrafficManagerProfile() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_trafficmanagerprofile_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_trafficmanagerprofile_info.py new file mode 100644 index 000000000..43a3492de --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_trafficmanagerprofile_info.py @@ -0,0 +1,421 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Hai Cao, , Yunge Zhu +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_trafficmanagerprofile_info + +version_added: "0.1.2" + +short_description: Get Azure Traffic Manager profile facts + +description: + - Get facts for a Azure specific Traffic Manager profile or all Traffic Manager profiles. + +options: + name: + description: + - Limit results to a specific Traffic Manager profile. + resource_group: + description: + - The resource group to search for the desired Traffic Manager profile. + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Hai Cao (@caohai) + - Yunge Zhu (@yungezz) +''' + +EXAMPLES = ''' + - name: Get facts for one Traffic Manager profile + azure_rm_trafficmanager_info: + name: Testing + resource_group: myResourceGroup + + - name: Get facts for all Traffic Manager profiles + azure_rm_trafficmanager_info: + + - name: Get facts by tags + azure_rm_trafficmanager_info: + tags: + - Environment:Test +''' + +RETURN = ''' +tms: + description: + - List of Traffic Manager profiles. + returned: always + type: complex + contains: + resource_group: + description: + - Name of a resource group where the Traffic Manager profile exists. + returned: always + type: str + sample: testGroup + name: + description: + - Name of the Traffic Manager profile. + returned: always + type: str + sample: testTm + state: + description: + - The state of the Traffic Manager profile. + returned: always + type: str + sample: present + location: + description: + - Location of the Traffic Manager profile. + returned: always + type: str + sample: global + profile_status: + description: + - The status of the Traffic Manager profile. + returned: always + type: str + sample: Enabled + routing_method: + description: + - The traffic routing method of the Traffic Manager profile. + returned: always + type: str + sample: performance + dns_config: + description: + - The DNS settings of the Traffic Manager profile. + returned: always + type: complex + contains: + relative_name: + description: + - The relative DNS name provided by the Traffic Manager profile. + returned: always + type: str + sample: testTm + fqdn: + description: + - The fully-qualified domain name(FQDN) of the Traffic Manager profile. + returned: always + type: str + sample: testTm.trafficmanager.net + ttl: + description: + - The DNS Time-To-Live(TTL), in seconds. + returned: always + type: int + sample: 60 + monitor_config: + description: + - The endpoint monitoring settings of the Traffic Manager profile. + returned: always + type: complex + contains: + protocol: + description: + - The protocol C(HTTP), C(HTTPS) or C(TCP) used to probe for endpoint health. + returned: always + type: str + sample: HTTP + port: + description: + - The TCP port used to probe for endpoint health. + returned: always + type: int + sample: 80 + path: + description: + - The path relative to the endpoint domain name used to probe for endpoint health. + returned: always + type: str + sample: / + interval: + description: + - The monitor interval for endpoints in this profile in seconds. + returned: always + type: int + sample: 10 + timeout: + description: + - The monitor timeout for endpoints in this profile in seconds. + returned: always + type: int + sample: 30 + tolerated_failures: + description: + - The number of consecutive failed health check before declaring an endpoint Degraded after the next failed health check. + returned: always + type: int + sample: 3 + endpoints: + description: + - The list of endpoints in the Traffic Manager profile. + returned: always + type: complex + contains: + id: + description: + - Fully qualified resource ID for the resource. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/trafficMan + agerProfiles/tmtest/externalEndpoints/e1" + name: + description: + - The name of the endpoint. + returned: always + type: str + sample: e1 + type: + description: + - The type of the endpoint. + returned: always + type: str + sample: external_endpoints + target_resource_id: + description: + - The Azure Resource URI of the of the endpoint. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.ClassicCompute/dom + ainNames/vscjavaci" + target: + description: + - The fully-qualified DNS name of the endpoint. + returned: always + type: str + sample: 8.8.8.8 + status: + description: + - The status of the endpoint. + returned: always + type: str + sample: Enabled + weight: + description: + - The weight of this endpoint when the profile has I(routing_method=weighted). + returned: always + type: int + sample: 10 + priority: + description: + - The priority of this endpoint when the profile has I(routing_method=priority). + returned: always + type: str + sample: 3 + location: + description: + - The location of endpoints when I(type=external_endpoints) or I(type=nested_endpoints), and profile I(routing_method=performance). + returned: always + type: str + sample: East US + min_child_endpoints: + description: + - The minimum number of endpoints that must be available in the child profile to make the parent profile available. + returned: always + type: int + sample: 3 + geo_mapping: + description: + - The list of countries/regions mapped to this endpoint when the profile has routing_method C(geographic). + returned: always + type: list + sample: [ + "GEO-NA", + "GEO-AS" + ] +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +from ansible.module_utils.common.dict_transformations import _camel_to_snake + +try: + from azure.core.exceptions import ResourceNotFoundError +except Exception: + # handled in azure_rm_common + pass + +import re + +AZURE_OBJECT_CLASS = 'trafficManagerProfiles' + + +def serialize_endpoint(endpoint): + result = dict( + id=endpoint.id, + name=endpoint.name, + target_resource_id=endpoint.target_resource_id, + target=endpoint.target, + status=endpoint.endpoint_status, + weight=endpoint.weight, + priority=endpoint.priority, + location=endpoint.endpoint_location, + min_child_endpoints=endpoint.min_child_endpoints, + geo_mapping=endpoint.geo_mapping, + ) + + if endpoint.type: + result['type'] = _camel_to_snake(endpoint.type.split("/")[-1]) + + return result + + +class AzureRMTrafficManagerProfileInfo(AzureRMModuleBase): + """Utility class to get Azure Traffic Manager profile facts""" + + def __init__(self): + + self.module_args = dict( + name=dict(type='str'), + resource_group=dict(type='str'), + tags=dict(type='list', elements='str') + ) + + self.results = dict( + changed=False, + tms=[] + ) + + self.name = None + self.resource_group = None + self.tags = None + + super(AzureRMTrafficManagerProfileInfo, self).__init__( + derived_arg_spec=self.module_args, + supports_check_mode=True, + supports_tags=False, + facts_module=True + ) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_trafficmanagerprofile_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_trafficmanagerprofile_facts' module has been renamed to 'azure_rm_trafficmanagerprofile_info'", + version=(2.9, )) + + for key in self.module_args: + setattr(self, key, kwargs[key]) + + if self.name and not self.resource_group: + self.fail("Parameter error: resource group required when filtering by name.") + + if self.name: + self.results['tms'] = self.get_item() + elif self.resource_group: + self.results['tms'] = self.list_resource_group() + else: + self.results['tms'] = self.list_all() + + return self.results + + def get_item(self): + """Get a single Azure Traffic Manager profile""" + + self.log('Get properties for {0}'.format(self.name)) + + item = None + result = [] + + try: + item = self.traffic_manager_management_client.profiles.get( + self.resource_group, self.name) + except ResourceNotFoundError: + pass + + if item and self.has_tags(item.tags, self.tags): + result = [self.serialize_tm(item)] + + return result + + def list_resource_group(self): + """Get all Azure Traffic Managers profiles within a resource group""" + + self.log('List all Azure Traffic Managers within a resource group') + + try: + response = self.traffic_manager_management_client.profiles.list_by_resource_group( + self.resource_group) + except Exception as exc: + self.fail('Failed to list all items - {0}'.format(str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(self.serialize_tm(item)) + + return results + + def list_all(self): + """Get all Azure Traffic Manager profiles within a subscription""" + self.log('List all Traffic Manager profiles within a subscription') + try: + response = self.traffic_manager_management_client.profiles.list_by_subscription() + except Exception as exc: + self.fail("Error listing all items - {0}".format(str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(self.serialize_tm(item)) + return results + + def serialize_tm(self, tm): + ''' + Convert a Traffic Manager profile object to dict. + :param tm: Traffic Manager profile object + :return: dict + ''' + result = self.serialize_obj(tm, AZURE_OBJECT_CLASS) + + new_result = {} + new_result['id'] = tm.id + new_result['resource_group'] = re.sub('\\/.*', '', re.sub('.*resourceGroups\\/', '', result['id'])) + new_result['name'] = tm.name + new_result['state'] = 'present' + new_result['location'] = tm.location + new_result['profile_status'] = tm.profile_status + new_result['routing_method'] = tm.traffic_routing_method.lower() + new_result['dns_config'] = dict( + relative_name=tm.dns_config.relative_name, + fqdn=tm.dns_config.fqdn, + ttl=tm.dns_config.ttl + ) + new_result['monitor_config'] = dict( + profile_monitor_status=tm.monitor_config.profile_monitor_status, + protocol=tm.monitor_config.protocol, + port=tm.monitor_config.port, + path=tm.monitor_config.path, + interval=tm.monitor_config.interval_in_seconds, + timeout=tm.monitor_config.timeout_in_seconds, + tolerated_failures=tm.monitor_config.tolerated_number_of_failures + ) + new_result['endpoints'] = [serialize_endpoint(endpoint) for endpoint in tm.endpoints] + new_result['tags'] = tm.tags + return new_result + + +def main(): + """Main module execution code path""" + + AzureRMTrafficManagerProfileInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualhub.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualhub.py new file mode 100644 index 000000000..d29abf3be --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualhub.py @@ -0,0 +1,767 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 XiuxiSun, (@Fred-sun) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_virtualhub +version_added: '1.10.0' +short_description: Manage Azure VirtualHub instance +description: + - Create, update and delete instance of Azure VirtualHub. +options: + resource_group: + description: + - The resource group name of the VirtualHub. + required: true + type: str + location: + description: + - The location of the VirtualHub. + type: str + name: + description: + - The name of the VirtualHub. + required: true + type: str + virtual_wan: + description: + - The VirtualWAN to which the VirtualHub belongs. + type: dict + suboptions: + id: + description: + - Resource ID. + type: str + vpn_gateway: + description: + - The VpnGateway associated with this VirtualHub. + type: dict + suboptions: + id: + description: + - Resource ID. + type: str + p2_s_vpn_gateway: + description: + - The P2SVpnGateway associated with this VirtualHub. + type: dict + suboptions: + id: + description: + - Resource ID. + type: str + express_route_gateway: + description: + - The expressRouteGateway associated with this VirtualHub. + type: dict + suboptions: + id: + description: + - Resource ID. + type: str + azure_firewall: + description: + - The azureFirewall associated with this VirtualHub. + type: dict + suboptions: + id: + description: + - Resource ID. + type: str + security_partner_provider: + description: + - The securityPartnerProvider associated with this VirtualHub. + type: dict + suboptions: + id: + description: + - Resource ID. + type: str + address_prefix: + description: + - Address-prefix for this VirtualHub. + type: str + route_table: + description: + - The routeTable associated with this virtual hub. + type: dict + suboptions: + routes: + description: + - List of all routes. + elements: dict + type: list + suboptions: + address_prefixes: + description: + - List of all addressPrefixes. + type: list + elements: str + next_hop_ip_address: + description: + - NextHop ip address. + type: str + security_provider_name: + description: + - The Security Provider name. + type: str + virtual_hub_route_table_v2_s: + description: + - List of all virtual hub route table v2s associated with this VirtualHub. + type: list + elements: dict + suboptions: + name: + description: + - The name of the resource that is unique within a resource group. + - This name can be used to access the resource. + type: str + routes: + description: + - List of all routes. + type: list + elements: dict + suboptions: + destination_type: + description: + - The type of destinations. + type: str + destinations: + description: + - List of all destinations. + type: list + elements: str + next_hop_type: + description: + - The type of next hops. + type: str + next_hops: + description: + - NextHops ip address. + type: list + elements: str + attached_connections: + description: + - List of all connections attached to this route table v2. + elements: str + type: list + sku: + description: + - The sku of this VirtualHub. + type: str + bgp_connections: + description: + - List of references to Bgp Connections. + type: list + elements: dict + suboptions: + id: + description: + - Resource ID. + type: str + ip_configurations: + description: + - List of references to IpConfigurations. + type: list + elements: dict + suboptions: + id: + description: + - Resource ID. + type: str + virtual_router_asn: + description: + - VirtualRouter ASN. + type: int + virtual_router_ips: + description: + - VirtualRouter IPs. + type: list + elements: str + enable_virtual_router_route_propogation: + description: + - Flag to control route propogation for VirtualRouter hub. + type: bool + state: + description: + - Assert the state of the VirtualHub. + - Use C(present) to create or update an VirtualHub and C(absent) to delete it. + default: present + type: str + choices: + - absent + - present +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags +author: + - Fred-Sun (@Fred-Sun) + - Haiyuan Zhang (@haiyuazhang) + +''' + +EXAMPLES = ''' + - name: Create a VirtualHub + azure_rm_virtualhub: + resource_group: myResourceGroup + name: my_virtual_hub_name + address_prefix: 10.2.0.0/24 + sku: Standard + location: eastus + enable_virtual_router_route_propogation: false + virtual_wan: + id: /subscriptions/xxx-xxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualWans/fredwan + + - name: Delete VirtualHub + azure_rm_virtualhub: + resource_group: myResourceGroup + name: my_virtual_hub_name + location: eastus + state: absent +''' + +RETURN = ''' +state: + description: + - Current state of the virtual hub. + type: complex + returned: always + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: /subscriptions/xxx-xxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualHubs/my_virtual_hub_name + name: + description: + - Resource name. + returned: always + type: str + sample: my_virtual_hub_name + type: + description: + - Resource type. + returned: always + type: str + sample: Microsoft.Network/virtualHubs + location: + description: + - Resource location. + returned: always + type: str + sample: eastus + tags: + description: + - Resource tags. + returned: always + type: dict + sample: { 'key1': 'value1' } + etag: + description: + - A unique read-only string that changes whenever the resource is updated. + returned: always + type: str + sample: cf8c0b06-d339-4155-95fd-2a363945cce4 + virtual_wan: + description: + - The VirtualWAN to which the VirtualHub belongs. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: /subscriptions/xxx-xxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualWans/fredwan + vpn_gateway: + description: + - The VpnGateway associated with this VirtualHub. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: null + p2_s_vpn_gateway: + description: + - The P2SVpnGateway associated with this VirtualHub. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: null + express_route_gateway: + description: + - The expressRouteGateway associated with this VirtualHub. + returned: always + type: dict + sample: null + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: null + azure_firewall: + description: + - The azureFirewall associated with this VirtualHub. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: null + security_partner_provider: + description: + - The securityPartnerProvider associated with this VirtualHub. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: null + address_prefix: + description: + - Address-prefix for this VirtualHub. + returned: always + type: str + sample: 10.2.0.0/24 + route_table: + description: + - The routeTable associated with this virtual hub. + returned: always + type: complex + contains: + routes: + description: + - List of all routes. + returned: always + type: list + contains: + address_prefixes: + description: + - List of all addressPrefixes. + returned: always + type: list + sample: null + next_hop_ip_address: + description: + - NextHop ip address. + returned: always + type: str + sample: null + provisioning_state: + description: + - The provisioning state of the virtual hub resource. + returned: always + type: str + sample: Succeeded + security_provider_name: + description: + - The Security Provider name. + returned: always + type: str + sample: null + virtual_hub_route_table_v2_s: + description: + - List of all virtual hub route table v2s associated with this VirtualHub. + returned: always + type: complex + contains: + name: + description: + - The name of the resource that is unique within a resource group. + - This name can be used to access the resource. + returned: always + type: str + sample: null + routes: + description: + - List of all routes. + returned: always + type: list + contains: + destination_type: + description: + - The type of destinations. + returned: always + type: str + sample: null + destinations: + description: + - List of all destinations. + returned: always + type: list + sample: null + next_hop_type: + description: + - The type of next hops. + returned: always + type: str + sample: null + next_hops: + description: + - NextHops ip address. + returned: always + type: list + sample: null + attached_connections: + description: + - List of all connections attached to this route table v2. + returned: always + type: list + sample: null + sku: + description: + - The sku of this VirtualHub. + returned: always + type: str + sample: null + routing_state: + description: + - The routing state. + returned: always + type: str + sample: Standard + bgp_connections: + description: + - List of references to Bgp Connections. + returned: always + type: list + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: null + ip_configurations: + description: + - List of references to IpConfigurations. + returned: always + type: list + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: null + virtual_router_asn: + description: + - VirtualRouter ASN. + returned: always + type: int + sample: null + virtual_router_ips: + description: + - VirtualRouter IPs. + returned: always + type: list + sample: null + enable_virtual_router_route_propogation: + description: + - Flag to control route propogation for VirtualRouter hub. + returned: always + type: bool + sample: null + +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt +try: + from msrestazure.azure_operation import AzureOperationPoller + from azure.core.exceptions import ResourceNotFoundError + from azure.core.polling import LROPoller +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMVirtualHub(AzureRMModuleBaseExt): + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + location=dict( + type='str', + ), + name=dict( + type='str', + required=True + ), + virtual_wan=dict( + type='dict', + options=dict( + id=dict( + type='str', + ) + ) + ), + vpn_gateway=dict( + type='dict', + options=dict( + id=dict( + type='str', + ) + ) + ), + p2_s_vpn_gateway=dict( + type='dict', + options=dict( + id=dict( + type='str', + ) + ) + ), + express_route_gateway=dict( + type='dict', + options=dict( + id=dict( + type='str', + ) + ) + ), + azure_firewall=dict( + type='dict', + options=dict( + id=dict( + type='str', + ) + ) + ), + security_partner_provider=dict( + type='dict', + options=dict( + id=dict( + type='str', + ) + ) + ), + address_prefix=dict( + type='str', + ), + route_table=dict( + type='dict', + options=dict( + routes=dict( + type='list', + elements='dict', + options=dict( + address_prefixes=dict( + type='list', + elements='str' + ), + next_hop_ip_address=dict( + type='str', + ) + ) + ) + ) + ), + security_provider_name=dict( + type='str', + ), + virtual_hub_route_table_v2_s=dict( + type='list', + elements='dict', + options=dict( + name=dict( + type='str', + ), + routes=dict( + type='list', + elements='dict', + options=dict( + destination_type=dict( + type='str', + ), + destinations=dict( + type='list', + elements='str' + ), + next_hop_type=dict( + type='str', + ), + next_hops=dict( + type='list', + elements='str' + ) + ) + ), + attached_connections=dict( + type='list', + elements='str' + ) + ) + ), + sku=dict( + type='str', + ), + bgp_connections=dict( + type='list', + elements='dict', + options=dict( + id=dict( + type='str', + ) + ) + ), + ip_configurations=dict( + type='list', + elements='dict', + options=dict( + id=dict( + type='str', + ) + ) + ), + virtual_router_asn=dict( + type='int', + ), + virtual_router_ips=dict( + type='list', + elements='str' + ), + enable_virtual_router_route_propogation=dict( + type='bool', + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.name = None + self.location = None + self.body = {} + + self.results = dict(changed=False) + self.state = None + self.to_do = Actions.NoAction + + super(AzureRMVirtualHub, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()): + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + self.body[key] = kwargs[key] + + self.inflate_parameters(self.module_arg_spec, self.body, 0) + + resource_group = self.get_resource_group(self.resource_group) + if self.location is None: + # Set default location + self.location = resource_group.location + self.body['location'] = self.location + + old_response = None + response = None + + old_response = self.get_resource() + + if not old_response: + if self.state == 'present': + self.to_do = Actions.Create + else: + if self.state == 'absent': + self.to_do = Actions.Delete + else: + modifiers = {} + self.create_compare_modifiers(self.module_arg_spec, '', modifiers) + self.results['modifiers'] = modifiers + self.results['compare'] = [] + if not self.default_compare(modifiers, self.body, old_response, '', self.results): + self.to_do = Actions.Update + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.results['changed'] = True + if self.check_mode: + return self.results + response = self.create_update_resource() + elif self.to_do == Actions.Delete: + self.results['changed'] = True + if self.check_mode: + return self.results + self.delete_resource() + else: + self.results['changed'] = False + response = old_response + + if response is not None: + self.results['state'] = response + + return self.results + + def create_update_resource(self): + try: + response = self.network_client.virtual_hubs.begin_create_or_update(resource_group_name=self.resource_group, + virtual_hub_name=self.name, + virtual_hub_parameters=self.body) + if isinstance(response, AzureOperationPoller) or isinstance(response, LROPoller): + response = self.get_poller_result(response) + except Exception as exc: + self.log('Error attempting to create the VirtualHub instance.') + self.fail('Error creating the VirtualHub instance: {0}'.format(str(exc))) + return response.as_dict() + + def delete_resource(self): + try: + response = self.network_client.virtual_hubs.begin_delete(resource_group_name=self.resource_group, + virtual_hub_name=self.name) + except Exception as e: + self.log('Error attempting to delete the VirtualHub instance.') + self.fail('Error deleting the VirtualHub instance: {0}'.format(str(e))) + + return True + + def get_resource(self): + try: + response = self.network_client.virtual_hubs.get(resource_group_name=self.resource_group, + virtual_hub_name=self.name) + except ResourceNotFoundError as e: + return False + return response.as_dict() + + +def main(): + AzureRMVirtualHub() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualhub_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualhub_info.py new file mode 100644 index 000000000..b7c1b8fae --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualhub_info.py @@ -0,0 +1,620 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 XiuxiSun, (@Fred-sun) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_virtualhub_info +version_added: '1.10.0' +short_description: Get VirtualHub info +description: + - Get info of VirtualHub. +options: + resource_group: + description: + - The resource group name of the VirtualHub. + type: str + name: + description: + - The name of the VirtualHub. + type: str +extends_documentation_fragment: + - azure.azcollection.azure +author: + - Fred-Sun (@Fred-Sun) + - Haiyuan Zhang (@haiyuazhang) + +''' + +EXAMPLES = ''' + - name: Get virtual hub info by name + azure_rm_virtualhub_info: + resource_group: myResourceGroup + name: virtualHub + + - name: Get virtual hub info by resource group + azure_rm_virtualhub_info: + resource_group: myResourceGroup + + - name: Get birtual hub info by sub + azure_rm_virtualhub_info: + +''' + +RETURN = ''' +virtual_hubs: + description: + - A list of dict results where the key is the name of the VirtualHub and the values are the facts for that VirtualHub. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: /subscriptions/xxx-xxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualHubs/fredhub + name: + description: + - Resource name. + returned: always + type: str + sample: fredhub + type: + description: + - Resource type. + returned: always + type: str + sample: Microsoft.Network/virtualHubs + location: + description: + - Resource location. + returned: always + type: str + sample: eastus + tags: + description: + - Resource tags. + returned: always + type: dict + sample: {"key1":"value1"} + etag: + description: + - A unique read-only string that changes whenever the resource is updated. + returned: always + type: str + sample: 4462b9db-6569-49be-8d00-000178b29e90 + virtual_wan: + description: + - The VirtualWAN to which the VirtualHub belongs. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: /subscriptions/xxx-xxx/resourceGroups/v-xisuRG/providers/Microsoft.Network/virtualWans/virtualwanfred01 + vpn_gateway: + description: + - The VpnGateway associated with this VirtualHub. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: null + p2_s_vpn_gateway: + description: + - The P2SVpnGateway associated with this VirtualHub. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: null + express_route_gateway: + description: + - The expressRouteGateway associated with this VirtualHub. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: null + azure_firewall: + description: + - The azureFirewall associated with this VirtualHub. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: null + security_partner_provider: + description: + - The securityPartnerProvider associated with this VirtualHub. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: null + address_prefix: + description: + - Address-prefix for this VirtualHub. + returned: always + type: str + sample: 10.0.0.0/24 + route_table: + description: + - The routeTable associated with this virtual hub. + returned: always + type: complex + contains: + routes: + description: + - List of all routes. + returned: always + type: list + contains: + address_prefixes: + description: + - List of all addressPrefixes. + returned: always + type: list + sample: null + next_hop_ip_address: + description: + - NextHop ip address. + returned: always + type: str + sample: null + provisioning_state: + description: + - The provisioning state of the virtual hub resource. + returned: always + type: str + sample: Succeeded + security_provider_name: + description: + - The Security Provider name. + returned: always + type: str + sample: null + virtual_hub_route_table_v2_s: + description: + - List of all virtual hub route table v2s associated with this VirtualHub. + returned: always + type: complex + contains: + name: + description: + - The name of the resource that is unique within a resource group. + - This name can be used to access the resource. + returned: always + type: str + sample: null + routes: + description: + - List of all routes. + returned: always + type: complex + contains: + destination_type: + description: + - The type of destinations. + returned: always + type: str + sample: null + destinations: + description: + - List of all destinations. + returned: always + type: list + sample: null + next_hop_type: + description: + - The type of next hops. + returned: always + type: str + sample: null + next_hops: + description: + - NextHops ip address. + returned: always + type: list + sample: null + attached_connections: + description: + - List of all connections attached to this route table v2. + returned: always + type: list + sample: null + sku: + description: + - The sku of this VirtualHub. + returned: always + type: str + sample: Standard + routing_state: + description: + - The routing state. + returned: always + type: str + sample: null + bgp_connections: + description: + - List of references to Bgp Connections. + returned: always + type: list + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: null + ip_configurations: + description: + - List of references to IpConfigurations. + returned: always + type: list + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: null + virtual_router_asn: + description: + - VirtualRouter ASN. + returned: always + type: int + sample: null + virtual_router_ips: + description: + - VirtualRouter IPs. + returned: always + type: list + sample: null + enable_virtual_router_route_propogation: + description: + - Flag to control route propogation for VirtualRouter hub. + returned: always + type: bool + sample: null + value: + description: + - List of VirtualHubs. + returned: always + type: complex + contains: + virtual_wan: + description: + - The VirtualWAN to which the VirtualHub belongs. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: null + vpn_gateway: + description: + - The VpnGateway associated with this VirtualHub. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: null + p2_s_vpn_gateway: + description: + - The P2SVpnGateway associated with this VirtualHub. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: null + express_route_gateway: + description: + - The expressRouteGateway associated with this VirtualHub. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: null + azure_firewall: + description: + - The azureFirewall associated with this VirtualHub. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: null + security_partner_provider: + description: + - The securityPartnerProvider associated with this VirtualHub. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: null + address_prefix: + description: + - Address-prefix for this VirtualHub. + returned: always + type: str + sample: null + route_table: + description: + - The routeTable associated with this virtual hub. + returned: always + type: complex + contains: + routes: + description: + - List of all routes. + returned: always + type: list + contains: + address_prefixes: + description: + - List of all addressPrefixes. + returned: always + type: list + sample: null + next_hop_ip_address: + description: + - NextHop ip address. + returned: always + type: str + sample: null + security_provider_name: + description: + - The Security Provider name. + returned: always + type: str + sample: null + virtual_hub_route_table_v2_s: + description: + - List of all virtual hub route table v2s associated with this VirtualHub. + returned: always + type: complex + contains: + name: + description: + - The name of the resource that is unique within a resource group. + - This name can be used to access the resource. + returned: always + type: str + sample: null + routes: + description: + - List of all routes. + returned: always + type: list + contains: + destination_type: + description: + - The type of destinations. + returned: always + type: str + sample: null + destinations: + description: + - List of all destinations. + returned: always + type: list + sample: null + next_hop_type: + description: + - The type of next hops. + returned: always + type: str + sample: null + next_hops: + description: + - NextHops ip address. + returned: always + type: list + sample: null + attached_connections: + description: + - List of all connections attached to this route table v2. + returned: always + type: list + sample: null + sku: + description: + - The sku of this VirtualHub. + returned: always + type: str + sample: null + bgp_connections: + description: + - List of references to Bgp Connections. + returned: always + type: list + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: null + ip_configurations: + description: + - List of references to IpConfigurations. + returned: always + type: list + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: null + virtual_router_asn: + description: + - VirtualRouter ASN. + returned: always + type: int + sample: null + virtual_router_ips: + description: + - VirtualRouter IPs. + returned: always + type: list + sample: null + enable_virtual_router_route_propogation: + description: + - Flag to control route propogation for VirtualRouter hub. + returned: always + type: bool + sample: null + next_link: + description: + - URL to get the next set of operation list results if there are any. + returned: always + type: str + sample: null + +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBase +try: + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMVirtualHubInfo(AzureRMModuleBase): + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str' + ), + name=dict( + type='str' + ) + ) + + self.resource_group = None + self.name = None + + self.results = dict(changed=False) + self.state = None + self.status_code = [200] + + super(AzureRMVirtualHubInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if (self.resource_group is not None and self.name is not None): + self.results['virtual_hubs'] = self.format_item(self.get()) + elif (self.resource_group is not None): + self.results['virtual_hubs'] = self.format_item(self.list_by_resource_group()) + else: + self.results['virtual_hubs'] = self.format_item(self.list()) + return self.results + + def get(self): + response = None + + try: + response = self.network_client.virtual_hubs.get(resource_group_name=self.resource_group, + virtual_hub_name=self.name) + except ResourceNotFoundError as e: + self.log('Could not get info for @(Model.ModuleOperationNameUpper).') + + return response + + def list_by_resource_group(self): + response = None + + try: + response = self.network_client.virtual_hubs.list_by_resource_group(resource_group_name=self.resource_group) + except ResourceNotFoundError as e: + self.log('Could not get info for @(Model.ModuleOperationNameUpper).') + + return response + + def list(self): + response = None + + try: + response = self.network_client.virtual_hubs.list() + except ResourceNotFoundError as e: + self.log('Could not get info for @(Model.ModuleOperationNameUpper).') + + return response + + def format_item(self, item): + if item is None: + return None + elif hasattr(item, 'as_dict'): + return [item.as_dict()] + else: + result = [] + items = list(item) + for tmp in items: + result.append(tmp.as_dict()) + return result + + +def main(): + AzureRMVirtualHubInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualhubconnection.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualhubconnection.py new file mode 100644 index 000000000..5208f665e --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualhubconnection.py @@ -0,0 +1,503 @@ +#!/usr/bin/python +# +# Copyright (c) 2022 xuzhang3 (@xuzhang3) +# XiuxiSun, (@Fred-sun) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_virtualhubconnection +version_added: '1.14.0' +short_description: Manage Azure VirtualHub instance +description: + - Create, update and delete instance of Azure VirtualHub. +options: + resource_group: + description: + - The resource group name of the VirtualHub. + required: true + type: str + name: + description: + - The name of the VirtualHub connection. + required: true + type: str + vhub_name: + description: + - The VirtualHub name. + type: str + required: True + enable_internet_security: + description: + - Enable internet security. + type: bool + allow_remote_vnet_to_use_hub_vnet_gateways: + description: + - Allow RemoteVnet to use Virtual Hub's gateways. + type: bool + allow_hub_to_remote_vnet_transit: + description: + - VirtualHub to RemoteVnet transit to enabled or not. + type: bool + remote_virtual_network: + description: + - ID of the remote VNet to connect to. + type: dict + suboptions: + id: + description: + - The remote virtual network ID. + type: str + routing_configuration: + description: + - The Routing Configuration indicating the associated and propagated route tables on this connection. + type: dict + suboptions: + propagated_route_tables: + description: + - The list of RouteTables to advertise the routes to. + type: dict + suboptions: + labels: + description: + - The list of labels. + type: list + elements: str + ids: + description: + -The list of resource ids of all the virtual hub RouteTables. + type: list + elements: dict + suboptions: + id: + description: + - The ID of the RouteTables. + type: str + vnet_routes: + description: + - List of routes that control routing from VirtualHub into a virtual network connection. + type: dict + suboptions: + static_routes: + description: + - List of all Static Routes. + type: list + elements: dict + suboptions: + name: + description: + - The name of the StaticRoute that is unique within a VnetRoute. + type: str + address_prefixes: + description: + - List of all address prefixes. + type: list + elements: str + next_hop_ip_address: + description: + - The ip address of the next hop. + type: str + state: + description: + - Assert the state of the VirtualHub connection. + - Use C(present) to create or update an VirtualHub connection and C(absent) to delete it. + default: present + type: str + choices: + - absent + - present +extends_documentation_fragment: + - azure.azcollection.azure +author: + - Fred-Sun (@Fred-Sun) + - Xu Zhang (@xuzhang3) + +''' + +EXAMPLES = ''' +- name: Create virtual hub connection + azure_rm_virtualhubconnection: + resource_group: myRG + vhub_name: testhub + name: Myconnection + enable_internet_security: false + allow_remote_vnet_to_use_hub_vnet_gateways: true + allow_hub_to_remote_vnet_transit: true + remote_virtual_network: + id: /subscriptions/xxx-xxx/resourceGroups/myRG/providers/Microsoft.Network/virtualNetworks/testvnet + routing_configuration: + propagated_route_tables: + labels: + - labels1 + - labels3 + ids: + - id: /subscriptions/xxx-xxx/resourceGroups/myRG/providers/Microsoft.Network/virtualHubs/testhub01/hubRouteTables/testtable + vnet_routes: + static_routes: + - name: route1 + address_prefixes: + - 10.1.0.0/16 + - 10.2.0.0/16 + next_hop_ip_address: 10.0.0.68 + - name: route2 + address_prefixes: + - 10.4.0.0/16 + next_hop_ip_address: 10.0.0.65 + +- name: Delete virtual hub connection + azure_rm_virtualhubconnection: + resource_group: myRG + vhub_name: testhub + name: Myconnection + state: absent + +''' + +RETURN = ''' +state: + description: + - A list of dict results for the virtual hub connection info. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: /subscriptions/xxx-xxx/resourceGroups/myRG/providers/Microsoft.Network/virtualHubs/vhub/hubVirtualNetworkConnections/MyConnection + name: + description: + - Resource name. + returned: always + type: str + sample: MyConnection + etag: + description: + - A unique read-only string that changes whenever the resource is updated. + returned: always + type: str + sample: 31102041-49e7-4cac-8573-aac1e1a16793 + remote_virtual_network: + description: + - Name of ID of the remote VNet to connect to. + returned: always + type: complex + contains: + id: + description: + - The ID of the remote VNet to connect to. + returned: always + type: str + sample: /subscriptions/xxx-xxx/resourceGroups/myRG/providers/Microsoft.Network/virtualNetworks/testvnet + routing_configuration: + description: + - The routing configuration information + returned: always + type: complex + contains: + associated_route_table: + description: + - The resource ID of route table associated with this routing configuration. + type: complex + returned: always + contains: + id: + description: + - The ID of the routetable. + type: str + returned: always + sample: /subscriptions/xxx-xxx/resourceGroups/myRG/providers/Microsoft.Network/virtualHubs/testhub/hubRouteTables/rt_name + propagated_route_tables: + description: + - Space-separated list of resource id of propagated route tables. + type: complex + returned: always + contains: + ids: + description: + - The list resource ID of propagated route tables. + type: list + returned: always + sample: [{ id: '/subscriptions/xxx-xxx/resourceGroups/myRG/providers/Microsoft.Network/virtualHubs/testhub/hubRouteTables/rt_name'}] + labels: + description: + - Space-separated list of labels for propagated route tables. + type: list + returned: always + sample: ['labels1', 'labels2'] + vnet_routes: + description: + - The name of the Static Route that is unique within a Vnet Route. + returned: always + type: complex + contains: + static_routes: + description: + - The name of the Static Route. + type: list + returned: always + contains: + address_prefixes: + description: + - Space-separated list of all address prefixes. + type: list + returned: always + sample: ["10.1.0.0/16", "10.2.0.0/16"] + name: + description: + - The name of static router. + type: str + returned: always + sample: route1 + next_hop_ip_address: + description: + - The next hop ip address. + type: str + returned: always + sample: 10.0.0.65 + provisioning_state: + description: + - The provisioning state of the virtual hub connection resource. + returned: always + type: str + sample: Succeeded + allow_hub_to_remote_vnet_transit: + description: + - Enable hub to remote VNet transit. + returned: always + type: bool + sample: true + allow_remote_vnet_to_use_hub_vnet_gateways: + description: + - Allow remote VNet to use hub's VNet gateways. + returned: always + type: bool + sample: true + enable_internet_security: + description: + - Enable internet security and default is enabled. + type: bool + returned: always + sample: true +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt +try: + from msrestazure.azure_operation import AzureOperationPoller + from azure.core.exceptions import ResourceNotFoundError + from azure.core.polling import LROPoller +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +static_routes_spec = dict( + name=dict(type='str'), + address_prefixes=dict(type='list', elements='str'), + next_hop_ip_address=dict(type='str') +) + + +class AzureRMVirtualHubConnection(AzureRMModuleBaseExt): + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + vhub_name=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + enable_internet_security=dict( + type='bool' + ), + allow_remote_vnet_to_use_hub_vnet_gateways=dict( + type='bool' + ), + allow_hub_to_remote_vnet_transit=dict( + type='bool' + ), + remote_virtual_network=dict( + type='dict', + options=dict( + id=dict( + type='str', + ) + ) + ), + routing_configuration=dict( + type='dict', + options=dict( + propagated_route_tables=dict( + type='dict', + options=dict( + labels=dict( + type='list', + elements='str' + ), + ids=dict( + type='list', + elements='dict', + options=dict( + id=dict( + type='str', + ) + ) + ) + ) + ), + vnet_routes=dict( + type='dict', + options=dict( + static_routes=dict( + type='list', + elements='dict', + options=static_routes_spec + ) + ) + ) + ) + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.vhub_name = None + self.name = None + self.body = {} + + self.results = dict(changed=False) + self.state = None + self.to_do = Actions.NoAction + + super(AzureRMVirtualHubConnection, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=False) + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()): + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + self.body[key] = kwargs[key] + + self.inflate_parameters(self.module_arg_spec, self.body, 0) + + old_response = None + response = None + + old_response = self.get_resource() + + if not old_response: + if self.state == 'present': + self.to_do = Actions.Create + else: + if self.state == 'absent': + self.to_do = Actions.Delete + else: + if self.body.get('enable_internet_security') is not None: + if bool(self.body['enable_internet_security']) != bool(old_response['enable_internet_security']): + self.to_do = Actions.Update + else: + self.body['enable_internet_security'] = old_response['enable_internet_security'] + if self.body.get('allow_remote_vnet_to_use_hub_vnet_gateways') is not None: + if bool(self.body['allow_remote_vnet_to_use_hub_vnet_gateways']) != bool(old_response['allow_remote_vnet_to_use_hub_vnet_gateways']): + self.to_do = Actions.Update + else: + self.body['allow_remote_vnet_to_use_hub_vnet_gateways'] = old_response['allow_remote_vnet_to_use_hub_vnet_gateways'] + if self.body.get('allow_hub_to_remote_vnet_transit') is not None: + if bool(self.body['allow_hub_to_remote_vnet_transit']) != bool(old_response['allow_hub_to_remote_vnet_transit']): + self.to_do = Actions.Update + else: + self.body['allow_hub_to_remote_vnet_transit'] = old_response['allow_hub_to_remote_vnet_transit'] + + if self.body.get('routing_configuration') is not None: + modifiers = {} + self.create_compare_modifiers(self.module_arg_spec, '', modifiers) + self.results['modifiers'] = modifiers + self.results['compare'] = [] + if not self.default_compare(modifiers, self.body['routing_configuration'], old_response['routing_configuration'], '', self.results): + self.to_do = Actions.Update + else: + self.body['routing_configuration'] = old_response['routing_configuration'] + else: + self.body['routing_configuration'] = old_response['routing_configuration'] + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.results['changed'] = True + if self.check_mode: + return self.results + response = self.create_update_resource() + elif self.to_do == Actions.Delete: + self.results['changed'] = True + if self.check_mode: + return self.results + self.delete_resource() + else: + self.results['changed'] = False + response = old_response + + if response is not None: + self.results['state'] = response + + return self.results + + def create_update_resource(self): + try: + response = self.network_client.hub_virtual_network_connections.begin_create_or_update(resource_group_name=self.resource_group, + virtual_hub_name=self.vhub_name, + connection_name=self.name, + hub_virtual_network_connection_parameters=self.body) + if isinstance(response, AzureOperationPoller) or isinstance(response, LROPoller): + response = self.get_poller_result(response) + except Exception as exc: + self.log('Error attempting to create the VirtualHub instance.') + self.fail('Error creating the VirtualHub instance: {0}'.format(str(exc))) + return response.as_dict() + + def delete_resource(self): + try: + response = self.network_client.hub_virtual_network_connections.begin_delete(resource_group_name=self.resource_group, + virtual_hub_name=self.vhub_name, + connection_name=self.name) + except Exception as e: + self.log('Error attempting to delete the VirtualHub connection instance.') + self.fail('Error deleting the VirtualHub connection instance: {0}'.format(str(e))) + + return True + + def get_resource(self): + try: + response = self.network_client.hub_virtual_network_connections.get(resource_group_name=self.resource_group, + virtual_hub_name=self.vhub_name, + connection_name=self.name) + except ResourceNotFoundError as e: + return False + return response.as_dict() + + +def main(): + AzureRMVirtualHubConnection() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualhubconnection_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualhubconnection_info.py new file mode 100644 index 000000000..d2d01dc25 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualhubconnection_info.py @@ -0,0 +1,270 @@ +#!/usr/bin/python +# +# Copyright (c) 2022 xuzhang3 (@xuzhang3) +# XiuxiSun, (@Fred-sun) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_virtualhubconnection_info +version_added: '1.14.0' +short_description: Get VirtualHub info +description: + - Get info of VirtualHub. +options: + resource_group: + description: + - The resource group name of the VirtualHub. + type: str + required: True + virtual_hub_name: + description: + - The resource name of the VirtualHub. + type: str + required: True + name: + description: + - The name of the VirtualHub connection. + type: str +extends_documentation_fragment: + - azure.azcollection.azure +author: + - Fred-Sun (@Fred-Sun) + - Xu Zhang (@xuzhang3) + +''' + +EXAMPLES = ''' + - name: Get virtual hub connection info by name + azure_rm_virtualhubconnection_info: + resource_group: myResourceGroup + virtual_hub_name: virtualHub + name: vhubname + + - name: Get virtual hub connection info by resource group + azure_rm_virtualhubconnection_info: + resource_group: myResourceGroup + virtual_hub_name: virtualHub +''' + +RETURN = ''' +virtual_hub_connection: + description: + - A list of dict results for the virtual hub connection info. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxx-xxx/resourceGroups/myRG/providers/Microsoft.Network/virtualHubs/vhub/hubVirtualNetworkConnections/MyConnection" + name: + description: + - Resource name. + returned: always + type: str + sample: MyConnection + etag: + description: + - A unique read-only string that changes whenever the resource is updated. + returned: always + type: str + sample: 31102041-49e7-4cac-8573-aac1e1a16793 + remote_virtual_network: + description: + - Name of ID of the remote VNet to connect to. + returned: always + type: complex + contains: + id: + description: + - The ID of the remote VNet to connect to. + returned: always + type: str + sample: /subscriptions/xxx-xxx/resourceGroups/myRG/providers/Microsoft.Network/virtualNetworks/testvnet + routing_configuration: + description: + - The routing configuration information + returned: always + type: complex + contains: + associated_route_table: + description: + - The resource ID of route table associated with this routing configuration. + type: complex + returned: always + contains: + id: + description: + - The ID of the routetable. + type: str + returned: always + sample: /subscriptions/xxx-xxx/resourceGroups/myRG/providers/Microsoft.Network/virtualHubs/testhub/hubRouteTables/rt_name + propagated_route_tables: + description: + - Space-separated list of resource id of propagated route tables. + type: complex + returned: always + contains: + ids: + description: + - The list resource ID of propagated route tables. + type: list + returned: always + sample: [{id: '/subscriptions/xxx-xxx/resourceGroups/myRG/providers/Microsoft.Network/virtualHubs/testhub/hubRouteTables/rt_name'}] + labels: + description: + - Space-separated list of labels for propagated route tables. + type: list + returned: always + sample: ['labels1', 'labels2'] + vnet_routes: + description: + - The name of the Static Route that is unique within a Vnet Route. + returned: always + type: complex + contains: + static_routes: + description: + - The name of the Static Route. + type: list + returned: always + contains: + address_prefixes: + description: + - Space-separated list of all address prefixes. + type: list + returned: always + sample: ["10.1.0.0/16", "10.2.0.0/16"] + name: + description: + - The name of static router. + type: str + returned: always + sample: route1 + next_hop_ip_address: + description: + - The next hop ip address. + type: str + returned: always + sample: 10.0.0.65 + provisioning_state: + description: + - The provisioning state of the virtual hub connection resource. + returned: always + type: str + sample: Succeeded + allow_hub_to_remote_vnet_transit: + description: + - Enable hub to remote VNet transit. + returned: always + type: bool + sample: true + allow_remote_vnet_to_use_hub_vnet_gateways: + description: + - Allow remote VNet to use hub's VNet gateways. + returned: always + type: bool + sample: true + enable_internet_security: + description: + - Enable internet security and default is enabled. + type: bool + returned: always + sample: true +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBase +try: + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMVirtualHubConnectionInfo(AzureRMModuleBase): + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str' + ), + virtual_hub_name=dict( + type='str', + required=True + ) + ) + + self.resource_group = None + self.name = None + self.virtual_hub_name = None + + self.results = dict(changed=False) + self.state = None + self.status_code = [200] + + super(AzureRMVirtualHubConnectionInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.name is not None: + self.results['virtual_hub_connection'] = self.format_item(self.get()) + else: + self.results['virtual_hub_connection'] = self.format_item(self.list()) + return self.results + + def get(self): + response = None + + try: + response = self.network_client.hub_virtual_network_connections.get(resource_group_name=self.resource_group, + virtual_hub_name=self.virtual_hub_name, + connection_name=self.name) + except ResourceNotFoundError: + self.log('Could not get info for @(Model.ModuleOperationNameUpper).') + + return response + + def list(self): + response = None + + try: + response = self.network_client.hub_virtual_network_connections.list(resource_group_name=self.resource_group, + virtual_hub_name=self.virtual_hub_name) + except Exception: + self.log('Could not get info for @(Model.ModuleOperationNameUpper).') + + return response + + def format_item(self, item): + if item is None: + return None + elif hasattr(item, 'as_dict'): + return [item.as_dict()] + else: + result = [] + items = list(item) + for tmp in items: + result.append(tmp.as_dict()) + return result + + +def main(): + AzureRMVirtualHubConnectionInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachine.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachine.py new file mode 100644 index 000000000..9fe82d75a --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachine.py @@ -0,0 +1,2544 @@ +#!/usr/bin/python +# +# Copyright (c) 2016 Matt Davis, +# Chris Houseknecht, +# Copyright (c) 2018 James E. King, III (@jeking3) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_virtualmachine + +version_added: "0.1.2" + +short_description: Manage Azure virtual machines + +description: + - Manage and configure virtual machines (VMs) and associated resources on Azure. + - Requires a resource group containing at least one virtual network with at least one subnet. + - Supports images from the Azure Marketplace, which can be discovered with M(azure.azcollection.azure_rm_virtualmachineimage_info). + - Supports custom images since Ansible 2.5. + - To use I(custom_data) on a Linux image, the image must have cloud-init enabled. If cloud-init is not enabled, I(custom_data) is ignored. + +options: + resource_group: + description: + - Name of the resource group containing the VM. + required: true + name: + description: + - Name of the VM. + required: true + custom_data: + description: + - Data made available to the VM and used by C(cloud-init). + - Only used on Linux images with C(cloud-init) enabled. + - Consult U(https://docs.microsoft.com/en-us/azure/virtual-machines/linux/using-cloud-init#cloud-init-overview) for cloud-init ready images. + - To enable cloud-init on a Linux image, follow U(https://docs.microsoft.com/en-us/azure/virtual-machines/linux/cloudinit-prepare-custom-image). + state: + description: + - State of the VM. + - Set to C(present) to create a VM with the configuration specified by other options, or to update the configuration of an existing VM. + - Set to C(absent) to remove a VM. + - Does not affect power state. Use I(started)/I(allocated)/I(restarted) parameters to change the power state of a VM. + default: present + choices: + - absent + - present + started: + description: + - Whether the VM is started or stopped. + - Set to (true) with I(state=present) to start the VM. + - Set to C(false) to stop the VM. + type: bool + allocated: + description: + - Whether the VM is allocated or deallocated, only useful with I(state=present). + default: True + type: bool + generalized: + description: + - Whether the VM is generalized or not. + - Set to C(true) with I(state=present) to generalize the VM. + - Generalizing a VM is irreversible. + type: bool + default: False + restarted: + description: + - Set to C(true) with I(state=present) to restart a running VM. + default: False + type: bool + location: + description: + - Valid Azure location for the VM. Defaults to location of the resource group. + short_hostname: + description: + - Name assigned internally to the host. On a Linux VM this is the name returned by the C(hostname) command. + - When creating a VM, short_hostname defaults to I(name). + vm_size: + description: + - A valid Azure VM size value. For example, C(Standard_D4). + - Choices vary depending on the subscription and location. Check your subscription for available choices. + - Required when creating a VM. + priority: + description: + - Priority of the VM. + - C(None) is the equivalent of Regular VM. + choices: + - None + - Spot + eviction_policy: + description: + - Specifies the eviction policy for the Azure Spot virtual machine. + - Requires priority to be set to Spot. + choices: + - Deallocate + - Delete + max_price: + description: + - Specifies the maximum price you are willing to pay for a Azure Spot VM/VMSS. + - This price is in US Dollars. + - C(-1) indicates default price to be up-to on-demand. + - Requires priority to be set to Spot. + default: -1 + admin_username: + description: + - Admin username used to access the VM after it is created. + - Required when creating a VM. + admin_password: + description: + - Password for the admin username. + - Not required if the I(os_type=Linux) and SSH password authentication is disabled by setting I(ssh_password_enabled=false). + ssh_password_enabled: + description: + - Whether to enable or disable SSH passwords. + - When I(os_type=Linux), set to C(false) to disable SSH password authentication and require use of SSH keys. + default: true + type: bool + ssh_public_keys: + description: + - For I(os_type=Linux) provide a list of SSH keys. + - Accepts a list of dicts where each dictionary contains two keys, I(path) and I(key_data). + - Set I(path) to the default location of the authorized_keys files. For example, I(path=/home//.ssh/authorized_keys). + - Set I(key_data) to the actual value of the public key. + image: + description: + - The image used to build the VM. + - For custom images, the name of the image. To narrow the search to a specific resource group, a dict with the keys I(name) and I(resource_group). + - For Marketplace images, a dict with the keys I(publisher), I(offer), I(sku), and I(version). + - Set I(version=latest) to get the most recent version of a given image. + required: true + availability_set: + description: + - Name or ID of an existing availability set to add the VM to. The I(availability_set) should be in the same resource group as VM. + proximity_placement_group: + description: + - The name or ID of the proximity placement group the VM should be associated with. + type: dict + suboptions: + id: + description: + - The ID of the proximity placement group the VM should be associated with. + type: str + name: + description: + - The Name of the proximity placement group the VM should be associated with. + type: str + resource_group: + description: + - The resource group of the proximity placement group the VM should be associated with. + type: str + storage_account_name: + description: + - Name of a storage account that supports creation of VHD blobs. + - If not specified for a new VM, a new storage account named 01 will be created using storage type C(Standard_LRS). + aliases: + - storage_account + storage_container_name: + description: + - Name of the container to use within the storage account to store VHD blobs. + - If not specified, a default container will be created. + default: vhds + aliases: + - storage_container + storage_blob_name: + description: + - Name of the storage blob used to hold the OS disk image of the VM. + - Must end with '.vhd'. + - If not specified, defaults to the VM name + '.vhd'. + aliases: + - storage_blob + managed_disk_type: + description: + - Managed OS disk type. + - Create OS disk with managed disk if defined. + - If not defined, the OS disk will be created with virtual hard disk (VHD). + choices: + - Standard_LRS + - StandardSSD_LRS + - StandardSSD_ZRS + - Premium_LRS + - Premium_ZRS + - UltraSSD_LRS + os_disk_name: + description: + - OS disk name. + os_disk_caching: + description: + - Type of OS disk caching. + choices: + - ReadOnly + - ReadWrite + aliases: + - disk_caching + os_disk_size_gb: + description: + - Size of OS disk in GB. + os_type: + description: + - Base type of operating system. + choices: + - Windows + - Linux + default: Linux + ephemeral_os_disk: + description: + - Parameters of ephemeral disk settings that can be specified for operating system disk. + - Ephemeral OS disk is only supported for VMS Instances using Managed Disk. + type: bool + data_disks: + description: + - Describes list of data disks. + - Use M(azure.azcollection.azure_rm_mangeddisk) to manage the specific disk. + suboptions: + lun: + description: + - The logical unit number for data disk. + - This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM. + required: true + disk_size_gb: + description: + - The initial disk size in GB for blank data disks. + - This value cannot be larger than C(1023) GB. + - Size can be changed only when the virtual machine is deallocated. + - Not sure when I(managed_disk_id) defined. + managed_disk_type: + description: + - Managed data disk type. + - Only used when OS disk created with managed disk. + choices: + - Standard_LRS + - StandardSSD_LRS + - StandardSSD_ZRS + - Premium_LRS + - Premium_ZRS + - UltraSSD_LRS + storage_account_name: + description: + - Name of an existing storage account that supports creation of VHD blobs. + - If not specified for a new VM, a new storage account started with I(name) will be created using storage type C(Standard_LRS). + - Only used when OS disk created with virtual hard disk (VHD). + - Used when I(managed_disk_type) not defined. + - Cannot be updated unless I(lun) updated. + storage_container_name: + description: + - Name of the container to use within the storage account to store VHD blobs. + - If no name is specified a default container named 'vhds' will created. + - Only used when OS disk created with virtual hard disk (VHD). + - Used when I(managed_disk_type) not defined. + - Cannot be updated unless I(lun) updated. + default: vhds + storage_blob_name: + description: + - Name of the storage blob used to hold the OS disk image of the VM. + - Must end with '.vhd'. + - Default to the I(name) + timestamp + I(lun) + '.vhd'. + - Only used when OS disk created with virtual hard disk (VHD). + - Used when I(managed_disk_type) not defined. + - Cannot be updated unless I(lun) updated. + caching: + description: + - Type of data disk caching. + choices: + - ReadOnly + - ReadWrite + default: ReadOnly + public_ip_allocation_method: + description: + - Allocation method for the public IP of the VM. + - Used only if a network interface is not specified. + - When set to C(Dynamic), the public IP address may change any time the VM is rebooted or power cycled. + - The C(Disabled) choice was added in Ansible 2.6. + choices: + - Dynamic + - Static + - Disabled + default: Static + aliases: + - public_ip_allocation + open_ports: + description: + - List of ports to open in the security group for the VM, when a security group and network interface are created with a VM. + - For Linux hosts, defaults to allowing inbound TCP connections to port 22. + - For Windows hosts, defaults to opening ports 3389 and 5986. + network_interface_names: + description: + - Network interface names to add to the VM. + - Can be a string of name or resource ID of the network interface. + - Can be a dict containing I(resource_group) and I(name) of the network interface. + - If a network interface name is not provided when the VM is created, a default network interface will be created. + - To create a new network interface, at least one Virtual Network with one Subnet must exist. + type: list + aliases: + - network_interfaces + virtual_network_resource_group: + description: + - The resource group to use when creating a VM with another resource group's virtual network. + virtual_network_name: + description: + - The virtual network to use when creating a VM. + - If not specified, a new network interface will be created and assigned to the first virtual network found in the resource group. + - Use with I(virtual_network_resource_group) to place the virtual network in another resource group. + aliases: + - virtual_network + subnet_name: + description: + - Subnet for the VM. + - Defaults to the first subnet found in the virtual network or the subnet of the I(network_interface_name), if provided. + - If the subnet is in another resource group, specify the resource group with I(virtual_network_resource_group). + aliases: + - subnet + created_nsg: + description: + - Whether network security group created and attached to network interface or not. + type: bool + default: True + version_added: '1.15.0' + remove_on_absent: + description: + - Associated resources to remove when removing a VM using I(state=absent). + - To remove all resources related to the VM being removed, including auto-created resources, set to C(all). + - To remove only resources that were automatically created while provisioning the VM being removed, set to C(all_autocreated). + - To remove only specific resources, set to C(network_interfaces), C(virtual_storage) or C(public_ips). + - Any other input will be ignored. + type: list + default: ['all'] + plan: + description: + - Third-party billing plan for the VM. + type: dict + suboptions: + name: + description: + - Billing plan name. + required: true + product: + description: + - Product name. + required: true + publisher: + description: + - Publisher offering the plan. + required: true + promotion_code: + description: + - Optional promotion code. + accept_terms: + description: + - Accept terms for Marketplace images that require it. + - Only Azure service admin/account admin users can purchase images from the Marketplace. + - Only valid when a I(plan) is specified. + type: bool + default: false + zones: + description: + - A list of Availability Zones for your VM. + type: list + license_type: + description: + - On-premise license for the image or disk. + - Only used for images that contain the Windows Server operating system. + - To remove all license type settings, set to the string C(None). + choices: + - Windows_Server + - Windows_Client + - RHEL_BYOS + - SLES_BYOS + vm_identity: + description: + - Identity for the VM. + choices: + - SystemAssigned + winrm: + description: + - List of Windows Remote Management configurations of the VM. + suboptions: + protocol: + description: + - The protocol of the winrm listener. + required: true + choices: + - http + - https + source_vault: + description: + - The relative URL of the Key Vault containing the certificate. + certificate_url: + description: + - The URL of a certificate that has been uploaded to Key Vault as a secret. + certificate_store: + description: + - The certificate store on the VM to which the certificate should be added. + - The specified certificate store is implicitly in the LocalMachine account. + boot_diagnostics: + description: + - Manage boot diagnostics settings for a VM. + - Boot diagnostics includes a serial console and remote console screenshots. + suboptions: + enabled: + description: + - Flag indicating if boot diagnostics are enabled. + required: true + type: bool + storage_account: + description: + - The name of an existing storage account to use for boot diagnostics. + - If not specified, uses I(storage_account_name) defined one level up. + - If storage account is not specified anywhere, and C(enabled) is C(true), a default storage account is created for boot diagnostics data. + required: false + resource_group: + description: + - Resource group where the storage account is located. + type: str + linux_config: + description: + - Specifies the Linux operating system settings on the virtual machine. + suboptions: + disable_password_authentication: + description: + - Specifies whether password authentication should be disabled. + type: bool + windows_config: + description: + - Specifies Windows operating system settings on the virtual machine. + suboptions: + provision_vm_agent: + description: + - Indicates whether virtual machine agent should be provisioned on the virtual machine. + type: bool + required: True + enable_automatic_updates: + description: + - Indicates whether Automatic Updates is enabled for the Windows virtual machine. + type: bool + required: True + security_profile: + description: + - Specifies the Security related profile settings for the virtual machine. + type: dict + suboptions: + encryption_at_host: + description: + - This property can be used by user in the request to enable or disable the Host Encryption for the virtual machine. + - This will enable the encryption for all the disks including Resource/Temp disk at host itself. + type: bool + security_type: + description: + - Specifies the SecurityType of the virtual machine. + - It is set as TrustedLaunch to enable UefiSettings. + type: str + choices: + - TrustedLaunch + uefi_settings: + description: + - Specifies the security settings like secure boot and vTPM used while creating the virtual machine. + type: dict + suboptions: + secure_boot_enabled: + description: + - Specifies whether secure boot should be enabled on the virtual machine. + type: bool + v_tpm_enabled: + description: + - Specifies whether vTPM should be enabled on the virtual machine. + type: bool + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Chris Houseknecht (@chouseknecht) + - Matt Davis (@nitzmahone) + - Christopher Perrin (@cperrin88) + - James E. King III (@jeking3) +''' +EXAMPLES = ''' + +- name: Create VM with defaults + azure_rm_virtualmachine: + resource_group: myResourceGroup + name: testvm10 + admin_username: "{{ username }}" + admin_password: "{{ password }}" + image: + offer: CentOS + publisher: OpenLogic + sku: '7.1' + version: latest + +- name: Create an availability set for managed disk vm + azure_rm_availabilityset: + name: avs-managed-disk + resource_group: myResourceGroup + platform_update_domain_count: 5 + platform_fault_domain_count: 2 + sku: Aligned + +- name: Create a VM with managed disk + azure_rm_virtualmachine: + resource_group: myResourceGroup + name: vm-managed-disk + admin_username: "{{ username }}" + availability_set: avs-managed-disk + managed_disk_type: Standard_LRS + image: + offer: 0001-com-ubuntu-server-focal + publisher: canonical + sku: 20_04-lts-gen2 + version: latest + vm_size: Standard_D4 + +- name: Create a VM with existing storage account and NIC + azure_rm_virtualmachine: + resource_group: myResourceGroup + name: testvm002 + vm_size: Standard_D4 + storage_account: testaccount001 + admin_username: "{{ username }}" + ssh_public_keys: + - path: /home/adminUser/.ssh/authorized_keys + key_data: < insert your ssh public key here... > + network_interfaces: testvm001 + image: + offer: CentOS + publisher: OpenLogic + sku: '7.1' + version: latest + +- name: Create a VM with OS and multiple data managed disks + azure_rm_virtualmachine: + resource_group: myResourceGroup + name: testvm001 + vm_size: Standard_D4 + managed_disk_type: Standard_LRS + admin_username: "{{ username }}" + ssh_public_keys: + - path: /home/adminUser/.ssh/authorized_keys + key_data: < insert your ssh public key here... > + image: + offer: 0001-com-ubuntu-server-focal + publisher: canonical + sku: 20_04-lts-gen2 + version: latest + data_disks: + - lun: 0 + managed_disk_id: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/disks/myDisk" + - lun: 1 + disk_size_gb: 128 + managed_disk_type: Premium_LRS + +- name: Create a VM with OS and multiple data storage accounts + azure_rm_virtualmachine: + resource_group: myResourceGroup + name: testvm001 + vm_size: Standard_DS1_v2 + admin_username: "{{ username }}" + ssh_password_enabled: false + ssh_public_keys: + - path: /home/adminUser/.ssh/authorized_keys + key_data: < insert your ssh public key here... > + network_interfaces: testvm001 + storage_container: osdisk + storage_blob: osdisk.vhd + boot_diagnostics: + enabled: yes + image: + offer: 0001-com-ubuntu-server-focal + publisher: canonical + sku: 20_04-lts-gen2 + version: latest + data_disks: + - lun: 0 + disk_size_gb: 64 + storage_container_name: datadisk1 + storage_blob_name: datadisk1.vhd + - lun: 1 + disk_size_gb: 128 + storage_container_name: datadisk2 + storage_blob_name: datadisk2.vhd + +- name: Create a VM with a custom image + azure_rm_virtualmachine: + resource_group: myResourceGroup + name: testvm001 + vm_size: Standard_DS1_v2 + admin_username: "{{ username }}" + admin_password: "{{ password }}" + image: customimage001 + +- name: Create a VM with a custom image from a particular resource group + azure_rm_virtualmachine: + resource_group: myResourceGroup + name: testvm001 + vm_size: Standard_DS1_v2 + admin_username: "{{ username }}" + admin_password: "{{ password }}" + image: + name: customimage001 + resource_group: myResourceGroup + +- name: Create a VM with an image id + azure_rm_virtualmachine: + resource_group: myResourceGroup + name: testvm001 + vm_size: Standard_DS1_v2 + admin_username: "{{ username }}" + admin_password: "{{ password }}" + image: + id: '{{image_id}}' + +- name: Create a VM with spcified OS disk size + azure_rm_virtualmachine: + resource_group: myResourceGroup + name: big-os-disk + admin_username: "{{ username }}" + admin_password: "{{ password }}" + os_disk_size_gb: 512 + image: + offer: CentOS + publisher: OpenLogic + sku: '7.1' + version: latest + +- name: Create a VM with OS and Plan, accepting the terms + azure_rm_virtualmachine: + resource_group: myResourceGroup + name: f5-nva + admin_username: "{{ username }}" + admin_password: "{{ password }}" + image: + publisher: f5-networks + offer: f5-big-ip-best + sku: f5-bigip-virtual-edition-200m-best-hourly + version: latest + plan: + name: f5-bigip-virtual-edition-200m-best-hourly + product: f5-big-ip-best + publisher: f5-networks + +- name: Create a VM with Spot Instance + azure_rm_virtualmachine: + resource_group: myResourceGroup + name: testvm10 + vm_size: Standard_D4 + priority: Spot + eviction_policy: Deallocate + admin_username: "{{ username }}" + admin_password: "{{ password }}" + image: + offer: CentOS + publisher: OpenLogic + sku: '7.1' + version: latest + +- name: Power Off + azure_rm_virtualmachine: + resource_group: myResourceGroup + name: testvm002 + started: no + +- name: Deallocate + azure_rm_virtualmachine: + resource_group: myResourceGroup + name: testvm002 + allocated: no + +- name: Power On + azure_rm_virtualmachine: + resource_group: myResourceGroup + name: testvm002 + +- name: Restart + azure_rm_virtualmachine: + resource_group: myResourceGroup + name: testvm002 + restarted: yes + +- name: Create a VM with an Availability Zone + azure_rm_virtualmachine: + resource_group: myResourceGroup + name: testvm001 + vm_size: Standard_DS1_v2 + admin_username: "{{ username }}" + admin_password: "{{ password }}" + image: customimage001 + zones: [1] + +- name: Create a VM with security profile + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + vm_size: Standard_D4s_v3 + managed_disk_type: Standard_LRS + admin_username: "{{ username }}" + admin_password: "{{ password }}" + security_profile: + uefi_settings: + secure_boot_enabled: True + v_tpm_enabled: True + encryption_at_host: True + security_type: TrustedLaunch + ssh_public_keys: + - path: /home/azureuser/.ssh/authorized_keys + key_data: "ssh-rsa *****" + image: + offer: 0001-com-ubuntu-server-jammy + publisher: Canonical + sku: 22_04-lts-gen2 + version: latest + +- name: Remove a VM and all resources that were autocreated + azure_rm_virtualmachine: + resource_group: myResourceGroup + name: testvm002 + remove_on_absent: all_autocreated + state: absent +''' + +RETURN = ''' +powerstate: + description: + - Indicates if the state is C(running), C(stopped), C(deallocated), C(generalized). + returned: always + type: str + sample: running +deleted_vhd_uris: + description: + - List of deleted Virtual Hard Disk URIs. + returned: 'on delete' + type: list + sample: ["https://testvm104519.blob.core.windows.net/vhds/testvm10.vhd"] +deleted_network_interfaces: + description: + - List of deleted NICs. + returned: 'on delete' + type: list + sample: ["testvm1001"] +deleted_public_ips: + description: + - List of deleted public IP address names. + returned: 'on delete' + type: list + sample: ["testvm1001"] +azure_vm: + description: + - Facts about the current state of the object. Note that facts are not part of the registered output but available directly. + returned: always + type: dict + sample: { + "properties": { + "availabilitySet": { + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Compute/availabilitySets/MYAVAILABILITYSET" + }, + "proximityPlacementGroup": { + "id": "/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Compute/proximityPlacementGroups/testid13" + }, + "hardwareProfile": { + "vmSize": "Standard_D1" + }, + "instanceView": { + "disks": [ + { + "name": "testvm10.vhd", + "statuses": [ + { + "code": "ProvisioningState/succeeded", + "displayStatus": "Provisioning succeeded", + "level": "Info", + "time": "2016-03-30T07:11:16.187272Z" + } + ] + } + ], + "statuses": [ + { + "code": "ProvisioningState/succeeded", + "displayStatus": "Provisioning succeeded", + "level": "Info", + "time": "2016-03-30T20:33:38.946916Z" + }, + { + "code": "PowerState/running", + "displayStatus": "VM running", + "level": "Info" + } + ], + "vmAgent": { + "extensionHandlers": [], + "statuses": [ + { + "code": "ProvisioningState/succeeded", + "displayStatus": "Ready", + "level": "Info", + "message": "GuestAgent is running and accepting new configurations.", + "time": "2016-03-30T20:31:16.000Z" + } + ], + "vmAgentVersion": "WALinuxAgent-2.0.16" + } + }, + "networkProfile": { + "networkInterfaces": [ + { + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkInterfaces/testvm10_NIC01", + "name": "testvm10_NIC01", + "properties": { + "dnsSettings": { + "appliedDnsServers": [], + "dnsServers": [] + }, + "enableIPForwarding": false, + "ipConfigurations": [ + { + "etag": 'W/"041c8c2a-d5dd-4cd7-8465-9125cfbe2cf8"', + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkInterfaces/testvm10_NIC01/ipConfigurations/default", + "name": "default", + "properties": { + "privateIPAddress": "10.10.0.5", + "privateIPAllocationMethod": "Dynamic", + "provisioningState": "Succeeded", + "publicIPAddress": { + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/publicIPAddresses/testvm10_PIP01", + "name": "testvm10_PIP01", + "properties": { + "idleTimeoutInMinutes": 4, + "ipAddress": "13.92.246.197", + "ipConfiguration": { + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkInterfaces/testvm10_NIC01/ipConfigurations/default" + }, + "provisioningState": "Succeeded", + "publicIPAllocationMethod": "Static", + "resourceGuid": "3447d987-ca0d-4eca-818b-5dddc0625b42" + } + } + } + } + ], + "macAddress": "00-0D-3A-12-AA-14", + "primary": true, + "provisioningState": "Succeeded", + "resourceGuid": "10979e12-ccf9-42ee-9f6d-ff2cc63b3844", + "virtualMachine": { + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Compute/virtualMachines/testvm10" + } + } + } + ] + }, + "osProfile": { + "adminUsername": "chouseknecht", + "computerName": "test10", + "linuxConfiguration": { + "disablePasswordAuthentication": false + }, + "secrets": [] + }, + "provisioningState": "Succeeded", + "storageProfile": { + "dataDisks": [ + { + "caching": "ReadWrite", + "createOption": "empty", + "diskSizeGB": 64, + "lun": 0, + "name": "datadisk1.vhd", + "vhd": { + "uri": "https://testvm10sa1.blob.core.windows.net/datadisk/datadisk1.vhd" + } + } + ], + "imageReference": { + "offer": "CentOS", + "publisher": "OpenLogic", + "sku": "7.1", + "version": "7.1.20160308" + }, + "osDisk": { + "caching": "ReadOnly", + "createOption": "fromImage", + "name": "testvm10.vhd", + "osType": "Linux", + "vhd": { + "uri": "https://testvm10sa1.blob.core.windows.net/vhds/testvm10.vhd" + } + } + } + }, + "type": "Microsoft.Compute/virtualMachines" + } +''' # NOQA + +import base64 +import random +import re +import time + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.core.polling import LROPoller + from msrestazure.azure_exceptions import CloudError + from azure.core.exceptions import ResourceNotFoundError + from msrestazure.tools import parse_resource_id +except ImportError: + # This is handled in azure_rm_common + pass + +from ansible.module_utils.basic import to_native, to_bytes +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import (AzureRMModuleBase, + azure_id_to_dict, + normalize_location_name, + format_resource_id + ) + + +AZURE_OBJECT_CLASS = 'VirtualMachine' + +AZURE_ENUM_MODULES = ['azure.mgmt.compute.models'] + + +def extract_names_from_blob_uri(blob_uri, storage_suffix): + # HACK: ditch this once python SDK supports get by URI + m = re.match(r'^https://(?P[^.]+)\.blob\.{0}/' + r'(?P[^/]+)/(?P.+)$'.format(storage_suffix), blob_uri) + if not m: + raise Exception("unable to parse blob uri '%s'" % blob_uri) + extracted_names = m.groupdict() + return extracted_names + + +proximity_placement_group_spec = dict( + id=dict(type='str'), + name=dict(type='str'), + resource_group=dict(type='str') +) + + +windows_configuration_spec = dict( + enable_automatic_updates=dict(type='bool', required=True), + provision_vm_agent=dict(type='bool', required=True), +) + + +linux_configuration_spec = dict( + disable_password_authentication=dict(type='bool') +) + + +class AzureRMVirtualMachine(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True), + name=dict(type='str', required=True), + custom_data=dict(type='str'), + state=dict(choices=['present', 'absent'], default='present', type='str'), + location=dict(type='str'), + short_hostname=dict(type='str'), + vm_size=dict(type='str'), + priority=dict(type='str', choices=['None', 'Spot']), + eviction_policy=dict(type='str', choices=['Deallocate', 'Delete']), + max_price=dict(type='float', default=-1), + admin_username=dict(type='str'), + admin_password=dict(type='str', no_log=True), + ssh_password_enabled=dict(type='bool', default=True, no_log=False), + ssh_public_keys=dict(type='list'), + image=dict(type='raw'), + availability_set=dict(type='str'), + storage_account_name=dict(type='str', aliases=['storage_account']), + storage_container_name=dict(type='str', aliases=['storage_container'], default='vhds'), + storage_blob_name=dict(type='str', aliases=['storage_blob']), + os_disk_caching=dict(type='str', aliases=['disk_caching'], choices=['ReadOnly', 'ReadWrite']), + os_disk_size_gb=dict(type='int'), + managed_disk_type=dict(type='str', choices=['Standard_LRS', 'StandardSSD_LRS', 'StandardSSD_ZRS', 'Premium_LRS', 'Premium_ZRS', 'UltraSSD_LRS']), + os_disk_name=dict(type='str'), + proximity_placement_group=dict(type='dict', options=proximity_placement_group_spec), + os_type=dict(type='str', choices=['Linux', 'Windows'], default='Linux'), + public_ip_allocation_method=dict(type='str', choices=['Dynamic', 'Static', 'Disabled'], default='Static', + aliases=['public_ip_allocation']), + open_ports=dict(type='list'), + network_interface_names=dict(type='list', aliases=['network_interfaces'], elements='raw'), + remove_on_absent=dict(type='list', default=['all']), + virtual_network_resource_group=dict(type='str'), + virtual_network_name=dict(type='str', aliases=['virtual_network']), + subnet_name=dict(type='str', aliases=['subnet']), + created_nsg=dict(type='bool', default=True), + allocated=dict(type='bool', default=True), + restarted=dict(type='bool', default=False), + started=dict(type='bool'), + generalized=dict(type='bool', default=False), + data_disks=dict(type='list'), + plan=dict(type='dict'), + zones=dict(type='list'), + accept_terms=dict(type='bool', default=False), + license_type=dict(type='str', choices=['Windows_Server', 'Windows_Client', 'RHEL_BYOS', 'SLES_BYOS']), + vm_identity=dict(type='str', choices=['SystemAssigned']), + winrm=dict(type='list'), + boot_diagnostics=dict(type='dict'), + ephemeral_os_disk=dict(type='bool'), + windows_config=dict(type='dict', options=windows_configuration_spec), + linux_config=dict(type='dict', options=linux_configuration_spec), + security_profile=dict(type='dict'), + ) + + self.resource_group = None + self.name = None + self.custom_data = None + self.state = None + self.location = None + self.short_hostname = None + self.vm_size = None + self.priority = None + self.eviction_policy = None + self.admin_username = None + self.admin_password = None + self.ssh_password_enabled = None + self.ssh_public_keys = None + self.image = None + self.availability_set = None + self.storage_account_name = None + self.storage_container_name = None + self.storage_blob_name = None + self.os_type = None + self.os_disk_caching = None + self.os_disk_size_gb = None + self.managed_disk_type = None + self.os_disk_name = None + self.proximity_placement_group = None + self.network_interface_names = None + self.remove_on_absent = set() + self.tags = None + self.force = None + self.public_ip_allocation_method = None + self.open_ports = None + self.virtual_network_resource_group = None + self.virtual_network_name = None + self.subnet_name = None + self.created_nsg = None + self.allocated = None + self.restarted = None + self.started = None + self.generalized = None + self.differences = None + self.data_disks = None + self.plan = None + self.accept_terms = None + self.zones = None + self.license_type = None + self.vm_identity = None + self.boot_diagnostics = None + self.ephemeral_os_disk = None + self.linux_config = None + self.windows_config = None + self.security_profile = None + + self.results = dict( + changed=False, + actions=[], + powerstate_change=None, + ansible_facts=dict(azure_vm=None) + ) + + super(AzureRMVirtualMachine, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True) + + @property + def boot_diagnostics_present(self): + return self.boot_diagnostics is not None and 'enabled' in self.boot_diagnostics + + def get_boot_diagnostics_storage_account(self, limited=False, vm_dict=None): + """ + Get the boot diagnostics storage account. + + Arguments: + - limited - if true, limit the logic to the boot_diagnostics storage account + this is used if initial creation of the VM has a stanza with + boot_diagnostics disabled, so we only create a storage account + if the user specifies a storage account name inside the boot_diagnostics + schema + - vm_dict - if invoked on an update, this is the current state of the vm including + tags, like the default storage group tag '_own_sa_'. + + Normal behavior: + - try the self.boot_diagnostics.storage_account field + - if not there, try the self.storage_account_name field + - if not there, use the default storage account + + If limited is True: + - try the self.boot_diagnostics.storage_account field + - if not there, None + """ + bsa = None + if 'storage_account' in self.boot_diagnostics: + if 'resource_group' in self.boot_diagnostics: + bsa = self.get_storage_account(self.boot_diagnostics['resource_group'], self.boot_diagnostics['storage_account']) + else: + bsa = self.get_storage_account(self.resource_group, self.boot_diagnostics['storage_account']) + elif limited: + return None + elif self.storage_account_name: + bsa = self.get_storage_account(self.resource_group, self.storage_account_name) + else: + bsa = self.create_default_storage_account(vm_dict=vm_dict) + self.log("boot diagnostics storage account:") + self.log(self.serialize_obj(bsa, 'StorageAccount'), pretty_print=True) + return bsa + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + # make sure options are lower case + self.remove_on_absent = set([resource.lower() for resource in self.remove_on_absent]) + + # convert elements to ints + self.zones = [int(i) for i in self.zones] if self.zones else None + + changed = False + powerstate_change = None + results = dict() + vm = None + network_interfaces = [] + requested_storage_uri = None + requested_vhd_uri = None + data_disk_requested_vhd_uri = None + disable_ssh_password = None + vm_dict = None + image_reference = None + custom_image = False + + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + # Set default location + self.location = resource_group.location + + self.location = normalize_location_name(self.location) + + if self.state == 'present': + # Verify parameters and resolve any defaults + + if self.vm_size and not self.vm_size_is_valid(): + self.fail("Parameter error: vm_size {0} is not valid for your subscription and location.".format( + self.vm_size + )) + + if self.network_interface_names: + for nic_name in self.network_interface_names: + nic = self.parse_network_interface(nic_name) + network_interfaces.append(nic) + + if self.ssh_public_keys: + msg = "Parameter error: expecting ssh_public_keys to be a list of type dict where " \ + "each dict contains keys: path, key_data." + for key in self.ssh_public_keys: + if not isinstance(key, dict): + self.fail(msg) + if not key.get('path') or not key.get('key_data'): + self.fail(msg) + + if self.image and isinstance(self.image, dict): + if all(key in self.image for key in ('publisher', 'offer', 'sku', 'version')): + marketplace_image = self.get_marketplace_image_version() + + if self.image['version'] == 'latest': + self.image['version'] = marketplace_image.name + self.log("Using image version {0}".format(self.image['version'])) + + image_reference = self.compute_models.ImageReference( + publisher=self.image['publisher'], + offer=self.image['offer'], + sku=self.image['sku'], + version=self.image['version'] + ) + elif self.image.get('name'): + custom_image = True + image_reference = self.get_custom_image_reference( + self.image.get('name'), + self.image.get('resource_group')) + elif self.image.get('id'): + try: + image_reference = self.compute_models.ImageReference(id=self.image['id']) + except Exception as exc: + self.fail("id Error: Cannot get image from the reference id - {0}".format(self.image['id'])) + else: + self.fail("parameter error: expecting image to contain [publisher, offer, sku, version], [name, resource_group] or [id]") + elif self.image and isinstance(self.image, str): + custom_image = True + image_reference = self.get_custom_image_reference(self.image) + elif self.image: + self.fail("parameter error: expecting image to be a string or dict not {0}".format(type(self.image).__name__)) + + if self.plan: + if not self.plan.get('name') or not self.plan.get('product') or not self.plan.get('publisher'): + self.fail("parameter error: plan must include name, product, and publisher") + + if not self.storage_blob_name and not self.managed_disk_type: + self.storage_blob_name = self.name + '.vhd' + elif self.managed_disk_type: + self.storage_blob_name = self.name + + if self.storage_account_name and not self.managed_disk_type: + properties = self.get_storage_account(self.resource_group, self.storage_account_name) + + requested_storage_uri = properties.primary_endpoints.blob + requested_vhd_uri = '{0}{1}/{2}'.format(requested_storage_uri, + self.storage_container_name, + self.storage_blob_name) + + disable_ssh_password = not self.ssh_password_enabled + + try: + self.log("Fetching virtual machine {0}".format(self.name)) + vm = self.compute_client.virtual_machines.get(self.resource_group, self.name, expand='instanceview') + retry_count = 0 + while True: + if retry_count == 10: + self.fail("Error {0} has a provisioning state of Updating. Expecting state to be Successed.".format(self.name)) + + if vm.provisioning_state == 'Updating': + retry_count = retry_count + 1 + time.sleep(300) + vm = self.compute_client.virtual_machines.get(self.resource_group, self.name, expand='instanceview') + else: + break + + vm_dict = self.serialize_vm(vm) + + if self.state == 'present': + differences = [] + current_nics = [] + results = vm_dict + current_osdisk = vm_dict['properties']['storageProfile']['osDisk'] + current_ephemeral = current_osdisk.get('diffDiskSettings', None) + current_properties = vm_dict['properties'] + + if self.priority and self.priority != current_properties.get('priority', 'None'): + self.fail('VM Priority is not updatable: requested virtual machine priority is {0}'.format(self.priority)) + if self.eviction_policy and \ + self.eviction_policy != current_properties.get('evictionPolicy', None): + self.fail('VM Eviction Policy is not updatable: requested virtual machine eviction policy is {0}'.format(self.eviction_policy)) + if self.max_price and \ + vm_dict['properties'].get('billingProfile', None) and \ + self.max_price != vm_dict['properties']['billingProfile'].get('maxPrice', None): + self.fail('VM Maximum Price is not updatable: requested virtual machine maximum price is {0}'.format(self.max_price)) + + if self.ephemeral_os_disk and current_ephemeral is None: + self.fail('Ephemeral OS disk not updatable: virtual machine ephemeral OS disk is {0}'.format(self.ephemeral_os_disk)) + elif not self.ephemeral_os_disk and current_ephemeral is not None: + self.fail('Ephemeral OS disk not updatable: virtual machine ephemeral OS disk is {0}'.format(self.ephemeral_os_disk)) + + # Try to determine if the VM needs to be updated + if self.network_interface_names: + for nic in vm_dict['properties']['networkProfile']['networkInterfaces']: + current_nics.append(nic['id']) + + if set(current_nics) != set(network_interfaces): + self.log('CHANGED: virtual machine {0} - network interfaces are different.'.format(self.name)) + differences.append('Network Interfaces') + updated_nics = [dict(id=id, primary=(i == 0)) + for i, id in enumerate(network_interfaces)] + vm_dict['properties']['networkProfile']['networkInterfaces'] = updated_nics + changed = True + + if self.os_disk_caching and \ + self.os_disk_caching != vm_dict['properties']['storageProfile']['osDisk']['caching']: + self.log('CHANGED: virtual machine {0} - OS disk caching'.format(self.name)) + differences.append('OS Disk caching') + changed = True + vm_dict['properties']['storageProfile']['osDisk']['caching'] = self.os_disk_caching + + if self.os_disk_name and \ + self.os_disk_name != vm_dict['properties']['storageProfile']['osDisk']['name']: + self.log('CHANGED: virtual machine {0} - OS disk name'.format(self.name)) + differences.append('OS Disk name') + changed = True + vm_dict['properties']['storageProfile']['osDisk']['name'] = self.os_disk_name + + if self.os_disk_size_gb and \ + self.os_disk_size_gb != vm_dict['properties']['storageProfile']['osDisk'].get('diskSizeGB'): + self.log('CHANGED: virtual machine {0} - OS disk size '.format(self.name)) + differences.append('OS Disk size') + changed = True + vm_dict['properties']['storageProfile']['osDisk']['diskSizeGB'] = self.os_disk_size_gb + + if self.vm_size and \ + self.vm_size != vm_dict['properties']['hardwareProfile']['vmSize']: + self.log('CHANGED: virtual machine {0} - size '.format(self.name)) + differences.append('VM size') + changed = True + vm_dict['properties']['hardwareProfile']['vmSize'] = self.vm_size + + update_tags, vm_dict['tags'] = self.update_tags(vm_dict.get('tags', dict())) + if update_tags: + differences.append('Tags') + changed = True + + if self.short_hostname and self.short_hostname != vm_dict['properties']['osProfile']['computerName']: + self.log('CHANGED: virtual machine {0} - short hostname'.format(self.name)) + differences.append('Short Hostname') + changed = True + vm_dict['properties']['osProfile']['computerName'] = self.short_hostname + + if self.started and vm_dict['powerstate'] not in ['starting', 'running'] and self.allocated: + self.log("CHANGED: virtual machine {0} not running and requested state 'running'".format(self.name)) + changed = True + powerstate_change = 'poweron' + elif self.state == 'present' and vm_dict['powerstate'] == 'running' and self.restarted: + self.log("CHANGED: virtual machine {0} {1} and requested state 'restarted'" + .format(self.name, vm_dict['powerstate'])) + changed = True + powerstate_change = 'restarted' + elif self.state == 'present' and not self.allocated and vm_dict['powerstate'] not in ['deallocated', 'deallocating']: + self.log("CHANGED: virtual machine {0} {1} and requested state 'deallocated'" + .format(self.name, vm_dict['powerstate'])) + changed = True + powerstate_change = 'deallocated' + elif self.started is not None and not self.started and vm_dict['powerstate'] == 'running': + self.log("CHANGED: virtual machine {0} running and requested state 'stopped'".format(self.name)) + changed = True + powerstate_change = 'poweroff' + elif self.generalized and vm_dict['powerstate'] != 'generalized': + self.log("CHANGED: virtual machine {0} requested to be 'generalized'".format(self.name)) + changed = True + powerstate_change = 'generalized' + + vm_dict['zones'] = [int(i) for i in vm_dict['zones']] if 'zones' in vm_dict and vm_dict['zones'] else None + if self.zones is not None and self.zones != vm_dict['zones']: + self.log("CHANGED: virtual machine {0} zones".format(self.name)) + differences.append('Zones') + changed = True + + if self.license_type is not None and vm_dict['properties'].get('licenseType') != self.license_type: + differences.append('License Type') + changed = True + + if self.security_profile is not None: + update_security_profile = False + if 'securityProfile' not in vm_dict['properties'].keys(): + update_security_profile = True + differences.append('security_profile') + else: + if self.security_profile.get('encryption_at_host') is not None: + if bool(self.security_profile.get('encryption_at_host')) != bool(vm_dict['properties']['securityProfile']['encryptionAtHost']): + update_security_profle = True + else: + self.security_profile['encryption_at_host'] = vm_dict['properties']['securityProfile']['encryptionAtHost'] + if self.security_profile.get('security_type') is not None: + if self.security_profile.get('security_type') != vm_dict['properties']['securityProfile']['securityType']: + update_security_profile = True + if self.security_profile.get('uefi_settings') is not None: + if self.security_profile['uefi_settings'].get('secure_boot_enabled') is not None: + if bool(self.security_profile['uefi_settings']['secure_boot_enabled']) != \ + bool(vm_dict['properties']['securityProfile']['uefiSettings']['secureBootEnabled']): + update_security_profile = True + else: + self.security_profile['uefi_settings']['secure_boot_enabled'] = \ + vm_dict['properties']['securityProfile']['uefiSettings']['secureBootEnabled'] + if self.security_profile['uefi_settings'].get('v_tpm_enabled') is not None: + if bool(self.security_profile['uefi_settings']['v_tpm_enabled']) != \ + bool(vm_dict['properties']['securityProfile']['uefiSettings']['vTpmEnabled']): + update_security_profile = True + else: + self.security_profile['uefi_settings']['v_tpm_enabled'] = \ + vm_dict['properties']['securityProfile']['uefiSettings']['vTpmEnabled'] + if update_security_profile: + changed = True + differences.append('security_profile') + + if self.windows_config is not None and vm_dict['properties']['osProfile'].get('windowsConfiguration') is not None: + if self.windows_config['enable_automatic_updates'] != vm_dict['properties']['osProfile']['windowsConfiguration']['enableAutomaticUpdates']: + self.fail("(PropertyChangeNotAllowed) Changing property 'windowsConfiguration.enableAutomaticUpdates' is not allowed.") + + if self.windows_config['provision_vm_agent'] != vm_dict['properties']['osProfile']['windowsConfiguration']['provisionVMAgent']: + self.fail("(PropertyChangeNotAllowed) Changing property 'windowsConfiguration.provisionVMAgent' is not allowed.") + + if self.linux_config is not None and vm_dict['properties']['osProfile'].get('linuxConfiguration') is not None: + if self.linux_config['disable_password_authentication'] != \ + vm_dict['properties']['osProfile']['linuxConfiguration']['disablePasswordAuthentication']: + self.fail("(PropertyChangeNotAllowed) Changing property 'linuxConfiguration.disablePasswordAuthentication' is not allowed.") + + # Defaults for boot diagnostics + if 'diagnosticsProfile' not in vm_dict['properties']: + vm_dict['properties']['diagnosticsProfile'] = {} + if 'bootDiagnostics' not in vm_dict['properties']['diagnosticsProfile']: + vm_dict['properties']['diagnosticsProfile']['bootDiagnostics'] = { + 'enabled': False, + 'storageUri': None + } + if self.boot_diagnostics_present: + current_boot_diagnostics = vm_dict['properties']['diagnosticsProfile']['bootDiagnostics'] + boot_diagnostics_changed = False + + if self.boot_diagnostics['enabled'] != current_boot_diagnostics['enabled']: + current_boot_diagnostics['enabled'] = self.boot_diagnostics['enabled'] + boot_diagnostics_changed = True + + boot_diagnostics_storage_account = self.get_boot_diagnostics_storage_account( + limited=not self.boot_diagnostics['enabled'], vm_dict=vm_dict) + boot_diagnostics_blob = boot_diagnostics_storage_account.primary_endpoints.blob if boot_diagnostics_storage_account else None + if current_boot_diagnostics.get('storageUri') != boot_diagnostics_blob: + current_boot_diagnostics['storageUri'] = boot_diagnostics_blob + boot_diagnostics_changed = True + + if boot_diagnostics_changed: + differences.append('Boot Diagnostics') + changed = True + + # Adding boot diagnostics can create a default storage account after initial creation + # this means we might also need to update the _own_sa_ tag + own_sa = (self.tags or {}).get('_own_sa_', None) + cur_sa = vm_dict.get('tags', {}).get('_own_sa_', None) + if own_sa and own_sa != cur_sa: + if 'Tags' not in differences: + differences.append('Tags') + if 'tags' not in vm_dict: + vm_dict['tags'] = {} + vm_dict['tags']['_own_sa_'] = own_sa + changed = True + + self.differences = differences + + elif self.state == 'absent': + self.log("CHANGED: virtual machine {0} exists and requested state is 'absent'".format(self.name)) + results = dict() + changed = True + + except ResourceNotFoundError: + self.log('Virtual machine {0} does not exist'.format(self.name)) + if self.state == 'present': + self.log("CHANGED: virtual machine {0} does not exist but state is 'present'.".format(self.name)) + changed = True + + self.results['changed'] = changed + self.results['ansible_facts']['azure_vm'] = results + self.results['powerstate_change'] = powerstate_change + + if self.check_mode: + return self.results + + if changed: + if self.state == 'present': + if not vm: + # Create the VM + self.log("Create virtual machine {0}".format(self.name)) + self.results['actions'].append('Created VM {0}'.format(self.name)) + + # Validate parameters + if not self.admin_username: + self.fail("Parameter error: admin_username required when creating a virtual machine.") + + if self.os_type == 'Linux': + if disable_ssh_password and not self.ssh_public_keys: + self.fail("Parameter error: ssh_public_keys required when disabling SSH password.") + + if not image_reference: + self.fail("Parameter error: an image is required when creating a virtual machine.") + + availability_set_resource = None + if self.availability_set: + parsed_availability_set = parse_resource_id(self.availability_set) + availability_set = self.get_availability_set(parsed_availability_set.get('resource_group', self.resource_group), + parsed_availability_set.get('name')) + availability_set_resource = self.compute_models.SubResource(id=availability_set.id) + + if self.zones: + self.fail("Parameter error: you can't use Availability Set and Availability Zones at the same time") + + proximity_placement_group_resource = None + if self.proximity_placement_group is not None: + if self.proximity_placement_group.get('id') is not None: + proximity_placement_group_resource = self.compute_models.SubResource(id=self.proximity_placement_group['id']) + elif self.proximity_placement_group.get('name') is not None and self.proximity_placement_group.get('resource_group') is not None: + proximity_placement_group = self.get_proximity_placement_group(self.proximity_placement_group.get('resource_group'), + self.proximity_placement_group.get('name')) + proximity_placement_group_resource = self.compute_models.SubResource(id=proximity_placement_group.id) + else: + self.fail("Parameter error: Please recheck your proximity placement group ") + + # Get defaults + if not self.network_interface_names: + default_nic = self.create_default_nic() + self.log("network interface:") + self.log(self.serialize_obj(default_nic, 'NetworkInterface'), pretty_print=True) + network_interfaces = [default_nic.id] + + # os disk + if not self.storage_account_name and not self.managed_disk_type: + storage_account = self.create_default_storage_account() + self.log("os disk storage account:") + self.log(self.serialize_obj(storage_account, 'StorageAccount'), pretty_print=True) + requested_storage_uri = 'https://{0}.blob.{1}/'.format( + storage_account.name, + self._cloud_environment.suffixes.storage_endpoint) + requested_vhd_uri = '{0}{1}/{2}'.format( + requested_storage_uri, + self.storage_container_name, + self.storage_blob_name) + # disk caching + if not self.os_disk_caching: + self.os_disk_caching = 'ReadOnly' + + if not self.short_hostname: + self.short_hostname = self.name + + nics = [self.compute_models.NetworkInterfaceReference(id=id, primary=(i == 0)) + for i, id in enumerate(network_interfaces)] + + # os disk + if self.managed_disk_type: + vhd = None + managed_disk = self.compute_models.ManagedDiskParameters(storage_account_type=self.managed_disk_type) + elif custom_image: + vhd = None + managed_disk = None + else: + vhd = self.compute_models.VirtualHardDisk(uri=requested_vhd_uri) + managed_disk = None + + plan = None + if self.plan: + plan = self.compute_models.Plan(name=self.plan.get('name'), product=self.plan.get('product'), + publisher=self.plan.get('publisher'), + promotion_code=self.plan.get('promotion_code')) + + # do this before creating vm_resource as it can modify tags + if self.boot_diagnostics_present and self.boot_diagnostics['enabled']: + boot_diag_storage_account = self.get_boot_diagnostics_storage_account() + + vm_resource = self.compute_models.VirtualMachine( + location=self.location, + tags=self.tags, + os_profile=self.compute_models.OSProfile( + admin_username=self.admin_username, + computer_name=self.short_hostname, + ), + hardware_profile=self.compute_models.HardwareProfile( + vm_size=self.vm_size + ), + storage_profile=self.compute_models.StorageProfile( + os_disk=self.compute_models.OSDisk( + name=self.os_disk_name if self.os_disk_name else self.storage_blob_name, + vhd=vhd, + managed_disk=managed_disk, + create_option=self.compute_models.DiskCreateOptionTypes.from_image, + caching=self.os_disk_caching, + disk_size_gb=self.os_disk_size_gb, + diff_disk_settings=self.compute_models.DiffDiskSettings(option='Local') if self.ephemeral_os_disk else None + ), + image_reference=image_reference, + ), + network_profile=self.compute_models.NetworkProfile( + network_interfaces=nics + ), + availability_set=availability_set_resource, + proximity_placement_group=proximity_placement_group_resource, + plan=plan, + zones=self.zones, + ) + + if self.priority == 'Spot': + vm_resource.priority = self.priority + vm_resource.eviction_policy = self.eviction_policy + vm_resource.billing_profile = self.compute_models.BillingProfile( + max_price=self.max_price + ) + + if self.license_type is not None: + vm_resource.license_type = self.license_type + + if self.vm_identity: + vm_resource.identity = self.compute_models.VirtualMachineIdentity(type=self.vm_identity) + + if self.winrm: + winrm_listeners = list() + for winrm_listener in self.winrm: + winrm_listeners.append(self.compute_models.WinRMListener( + protocol=winrm_listener.get('protocol'), + certificate_url=winrm_listener.get('certificate_url') + )) + if winrm_listener.get('source_vault'): + if not vm_resource.os_profile.secrets: + vm_resource.os_profile.secrets = list() + + vm_resource.os_profile.secrets.append(self.compute_models.VaultSecretGroup( + source_vault=self.compute_models.SubResource( + id=winrm_listener.get('source_vault') + ), + vault_certificates=[ + self.compute_models.VaultCertificate( + certificate_url=winrm_listener.get('certificate_url'), + certificate_store=winrm_listener.get('certificate_store') + ), + ] + )) + + self.winrm = self.compute_models.WinRMConfiguration( + listeners=winrm_listeners + ) + + if self.os_type == 'Windows': + vm_resource.os_profile.windows_configuration = self.compute_models.WindowsConfiguration( + win_rm=self.winrm, + provision_vm_agent=self.windows_config['provision_vm_agent'] if self.windows_config is not None else True, + enable_automatic_updates=self.windows_config['enable_automatic_updates'] if self.windows_config is not None else True, + ) + + if self.boot_diagnostics_present: + if self.boot_diagnostics['enabled']: + storage_uri = boot_diag_storage_account.primary_endpoints.blob + else: + storage_uri = None + vm_resource.diagnostics_profile = self.compute_models.DiagnosticsProfile( + boot_diagnostics=self.compute_models.BootDiagnostics( + enabled=self.boot_diagnostics['enabled'], + storage_uri=storage_uri)) + + if self.admin_password: + vm_resource.os_profile.admin_password = self.admin_password + + if self.custom_data: + # Azure SDK (erroneously?) wants native string type for this + vm_resource.os_profile.custom_data = to_native(base64.b64encode(to_bytes(self.custom_data))) + + if self.os_type == 'Linux': + vm_resource.os_profile.linux_configuration = self.compute_models.LinuxConfiguration( + disable_password_authentication=self.linux_config['disable_password_authentication'] if self.linux_config else disable_ssh_password + ) + if self.ssh_public_keys: + ssh_config = self.compute_models.SshConfiguration() + ssh_config.public_keys = \ + [self.compute_models.SshPublicKey(path=key['path'], key_data=key['key_data']) for key in self.ssh_public_keys] + vm_resource.os_profile.linux_configuration.ssh = ssh_config + + # data disk + if self.data_disks: + data_disks = [] + count = 0 + + for data_disk in self.data_disks: + if not data_disk.get('managed_disk_type'): + if not data_disk.get('storage_blob_name'): + data_disk['storage_blob_name'] = self.name + '-data-' + str(count) + '.vhd' + count += 1 + + if data_disk.get('storage_account_name'): + data_disk_storage_account = self.get_storage_account(self.resource_group, data_disk['storage_account_name']) + else: + data_disk_storage_account = self.create_default_storage_account() + self.log("data disk storage account:") + self.log(self.serialize_obj(data_disk_storage_account, 'StorageAccount'), pretty_print=True) + + if not data_disk.get('storage_container_name'): + data_disk['storage_container_name'] = 'vhds' + + data_disk_requested_vhd_uri = 'https://{0}.blob.{1}/{2}/{3}'.format( + data_disk_storage_account.name, + self._cloud_environment.suffixes.storage_endpoint, + data_disk['storage_container_name'], + data_disk['storage_blob_name'] + ) + + if not data_disk.get('managed_disk_type'): + data_disk_managed_disk = None + disk_name = data_disk['storage_blob_name'] + data_disk_vhd = self.compute_models.VirtualHardDisk(uri=data_disk_requested_vhd_uri) + else: + data_disk_vhd = None + data_disk_managed_disk = self.compute_models.ManagedDiskParameters(storage_account_type=data_disk['managed_disk_type']) + disk_name = self.name + "-datadisk-" + str(count) + count += 1 + + data_disk['caching'] = data_disk.get( + 'caching', 'ReadOnly' + ) + + data_disks.append(self.compute_models.DataDisk( + lun=data_disk['lun'], + name=disk_name, + vhd=data_disk_vhd, + caching=data_disk['caching'], + create_option=self.compute_models.DiskCreateOptionTypes.empty, + disk_size_gb=data_disk['disk_size_gb'], + managed_disk=data_disk_managed_disk, + )) + + vm_resource.storage_profile.data_disks = data_disks + + # Before creating VM accept terms of plan if `accept_terms` is True + if self.accept_terms is True: + if not self.plan or not all([self.plan.get('name'), self.plan.get('product'), self.plan.get('publisher')]): + self.fail("parameter error: plan must be specified and include name, product, and publisher") + try: + plan_name = self.plan.get('name') + plan_product = self.plan.get('product') + plan_publisher = self.plan.get('publisher') + term = self.marketplace_client.marketplace_agreements.get( + offer_type='virtualmachine', publisher_id=plan_publisher, offer_id=plan_product, plan_id=plan_name) + term.accepted = True + self.marketplace_client.marketplace_agreements.create( + offer_type='virtualmachine', publisher_id=plan_publisher, offer_id=plan_product, plan_id=plan_name, parameters=term) + except Exception as exc: + self.fail(("Error accepting terms for virtual machine {0} with plan {1}. " + + "Only service admin/account admin users can purchase images " + + "from the marketplace. - {2}").format(self.name, self.plan, str(exc))) + + if self.security_profile is not None: + uefi_settings_spec = None + if self.security_profile.get('uefi_settings') is not None: + uefi_settings_spec = self.compute_models.UefiSettings( + secure_boot_enabled=self.security_profile['uefi_settings'].get('secure_boot_enabled'), + v_tpm_enabled=self.security_profile['uefi_settings'].get('v_tpm_enabled'), + ) + security_profile = self.compute_models.SecurityProfile( + uefi_settings=uefi_settings_spec, + encryption_at_host=self.security_profile.get('encryption_at_host'), + security_type=self.security_profile.get('security_type'), + ) + vm_resource.security_profile = security_profile + + self.log("Create virtual machine with parameters:") + self.create_or_update_vm(vm_resource, 'all_autocreated' in self.remove_on_absent) + + elif self.differences and len(self.differences) > 0: + # Update the VM based on detected config differences + + self.log("Update virtual machine {0}".format(self.name)) + self.results['actions'].append('Updated VM {0}'.format(self.name)) + nics = [self.compute_models.NetworkInterfaceReference(id=interface['id'], primary=(i == 0)) + for i, interface in enumerate(vm_dict['properties']['networkProfile']['networkInterfaces'])] + + # os disk + if not vm_dict['properties']['storageProfile']['osDisk'].get('managedDisk'): + managed_disk = None + vhd = self.compute_models.VirtualHardDisk(uri=vm_dict['properties']['storageProfile']['osDisk'].get('vhd', {}).get('uri')) + else: + vhd = None + managed_disk = self.compute_models.ManagedDiskParameters( + storage_account_type=vm_dict['properties']['storageProfile']['osDisk']['managedDisk'].get('storageAccountType') + ) + + proximity_placement_group_resource = None + try: + proximity_placement_group_resource = self.compute_models.SubResource(id=vm_dict['properties']['proximityPlacementGroup'].get('id')) + except Exception: + # pass if the proximity Placement Group + pass + + availability_set_resource = None + try: + availability_set_resource = self.compute_models.SubResource(id=vm_dict['properties']['availabilitySet'].get('id')) + except Exception: + # pass if the availability set is not set + pass + + if 'imageReference' in vm_dict['properties']['storageProfile'].keys(): + if 'id' in vm_dict['properties']['storageProfile']['imageReference'].keys(): + image_reference = self.compute_models.ImageReference( + id=vm_dict['properties']['storageProfile']['imageReference']['id'] + ) + else: + image_reference = self.compute_models.ImageReference( + publisher=vm_dict['properties']['storageProfile']['imageReference'].get('publisher'), + offer=vm_dict['properties']['storageProfile']['imageReference'].get('offer'), + sku=vm_dict['properties']['storageProfile']['imageReference'].get('sku'), + version=vm_dict['properties']['storageProfile']['imageReference'].get('version') + ) + else: + image_reference = None + + # You can't change a vm zone + if self.zones is not None and vm_dict['zones'] != self.zones: + self.fail("You can't change the Availability Zone of a virtual machine (have: {0}, want: {1})".format(vm_dict['zones'], self.zones)) + + if 'osProfile' in vm_dict['properties']: + os_profile = self.compute_models.OSProfile( + admin_username=vm_dict['properties'].get('osProfile', {}).get('adminUsername'), + computer_name=vm_dict['properties'].get('osProfile', {}).get('computerName') + ) + else: + os_profile = None + + vm_resource = self.compute_models.VirtualMachine( + location=vm_dict['location'], + os_profile=os_profile, + hardware_profile=self.compute_models.HardwareProfile( + vm_size=vm_dict['properties']['hardwareProfile'].get('vmSize') + ), + storage_profile=self.compute_models.StorageProfile( + os_disk=self.compute_models.OSDisk( + name=vm_dict['properties']['storageProfile']['osDisk'].get('name'), + vhd=vhd, + managed_disk=managed_disk, + create_option=vm_dict['properties']['storageProfile']['osDisk'].get('createOption'), + os_type=vm_dict['properties']['storageProfile']['osDisk'].get('osType'), + caching=vm_dict['properties']['storageProfile']['osDisk'].get('caching'), + disk_size_gb=vm_dict['properties']['storageProfile']['osDisk'].get('diskSizeGB') + ), + image_reference=image_reference + ), + availability_set=availability_set_resource, + proximity_placement_group=proximity_placement_group_resource, + network_profile=self.compute_models.NetworkProfile( + network_interfaces=nics + ) + ) + + if self.license_type is not None: + vm_resource.license_type = self.license_type + + if self.boot_diagnostics is not None: + vm_resource.diagnostics_profile = self.compute_models.DiagnosticsProfile( + boot_diagnostics=self.compute_models.BootDiagnostics( + enabled=vm_dict['properties']['diagnosticsProfile']['bootDiagnostics']['enabled'], + storage_uri=vm_dict['properties']['diagnosticsProfile']['bootDiagnostics']['storageUri'])) + + if vm_dict.get('tags'): + vm_resource.tags = vm_dict['tags'] + + # Add custom_data, if provided + if vm_dict['properties'].get('osProfile', {}).get('customData'): + custom_data = vm_dict['properties']['osProfile']['customData'] + # Azure SDK (erroneously?) wants native string type for this + vm_resource.os_profile.custom_data = to_native(base64.b64encode(to_bytes(custom_data))) + + # Add admin password, if one provided + if vm_dict['properties'].get('osProfile', {}).get('adminPassword'): + vm_resource.os_profile.admin_password = vm_dict['properties']['osProfile']['adminPassword'] + + # Add Windows configuration, if applicable + windows_config = vm_dict['properties'].get('osProfile', {}).get('windowsConfiguration') + if windows_config: + if self.windows_config is not None: + vm_resource.os_profile.windows_configuration = self.compute_models.WindowsConfiguration( + provision_vm_agent=self.windows_config['provision_vm_agent'], + enable_automatic_updates=self.windows_config['enable_automatic_updates'] + ) + else: + vm_resource.os_profile.windows_configuration = self.compute_models.WindowsConfiguration( + provision_vm_agent=windows_config.get('provisionVMAgent', True), + enable_automatic_updates=windows_config.get('enableAutomaticUpdates', True) + ) + + # Add linux configuration, if applicable + linux_config = vm_dict['properties'].get('osProfile', {}).get('linuxConfiguration') + if linux_config: + if self.linux_config is not None: + vm_resource.os_profile.linux_configuration = self.compute_models.LinuxConfiguration( + disable_password_authentication=self.linux_config['disable_password_authentication'] + ) + else: + vm_resource.os_profile.linux_configuration = self.compute_models.LinuxConfiguration( + disable_password_authentication=linux_config.get('disablePasswordAuthentication', False) + ) + ssh_config = linux_config.get('ssh', None) + if ssh_config: + public_keys = ssh_config.get('publicKeys') + if public_keys: + vm_resource.os_profile.linux_configuration.ssh = self.compute_models.SshConfiguration(public_keys=[]) + for key in public_keys: + vm_resource.os_profile.linux_configuration.ssh.public_keys.append( + self.compute_models.SshPublicKey(path=key['path'], key_data=key['keyData']) + ) + + # data disk + if vm_dict['properties']['storageProfile'].get('dataDisks'): + data_disks = [] + + for data_disk in vm_dict['properties']['storageProfile']['dataDisks']: + if data_disk.get('managedDisk'): + managed_disk_type = data_disk['managedDisk'].get('storageAccountType') + data_disk_managed_disk = self.compute_models.ManagedDiskParameters(storage_account_type=managed_disk_type) + data_disk_vhd = None + else: + data_disk_vhd = data_disk['vhd']['uri'] + data_disk_managed_disk = None + + data_disks.append(self.compute_models.DataDisk( + lun=int(data_disk['lun']), + name=data_disk.get('name'), + vhd=data_disk_vhd, + caching=data_disk.get('caching'), + create_option=data_disk.get('createOption'), + disk_size_gb=int(data_disk.get('diskSizeGB', 0)) or None, + managed_disk=data_disk_managed_disk, + )) + vm_resource.storage_profile.data_disks = data_disks + + if self.security_profile is not None: + uefi_settings_spec = None + if self.security_profile.get('uefi_settings') is not None: + uefi_settings_spec = self.compute_models.UefiSettings( + secure_boot_enabled=self.security_profile['uefi_settings'].get('secure_boot_enabled'), + v_tpm_enabled=self.security_profile['uefi_settings'].get('v_tpm_enabled'), + ) + security_profile = self.compute_models.SecurityProfile( + uefi_settings=uefi_settings_spec, + encryption_at_host=self.security_profile.get('encryption_at_host'), + security_type=self.security_profile.get('security_type'), + ) + vm_resource.security_profile = security_profile + + self.log("Update virtual machine with parameters:") + self.create_or_update_vm(vm_resource, False) + + # Make sure we leave the machine in requested power state + if (powerstate_change == 'poweron' and + self.results['ansible_facts']['azure_vm']['powerstate'] != 'running'): + # Attempt to power on the machine + self.power_on_vm() + + elif (powerstate_change == 'poweroff' and + self.results['ansible_facts']['azure_vm']['powerstate'] == 'running'): + # Attempt to power off the machine + self.power_off_vm() + + elif powerstate_change == 'restarted': + self.restart_vm() + + elif powerstate_change == 'deallocated': + self.deallocate_vm() + elif powerstate_change == 'generalized': + self.power_off_vm() + self.generalize_vm() + + self.results['ansible_facts']['azure_vm'] = self.serialize_vm(self.get_vm()) + + elif self.state == 'absent': + # delete the VM + self.log("Delete virtual machine {0}".format(self.name)) + self.results['ansible_facts']['azure_vm'] = None + self.delete_vm(vm) + + # until we sort out how we want to do this globally + del self.results['actions'] + + return self.results + + def get_vm(self): + ''' + Get the VM with expanded instanceView + + :return: VirtualMachine object + ''' + try: + vm = self.compute_client.virtual_machines.get(self.resource_group, self.name, expand='instanceview') + return vm + except Exception as exc: + self.fail("Error getting virtual machine {0} - {1}".format(self.name, str(exc))) + + def serialize_vm(self, vm): + ''' + Convert a VirtualMachine object to dict. + + :param vm: VirtualMachine object + :return: dict + ''' + + result = self.serialize_obj(vm, AZURE_OBJECT_CLASS, enum_modules=AZURE_ENUM_MODULES) + result['id'] = vm.id + result['name'] = vm.name + result['type'] = vm.type + result['location'] = vm.location + result['tags'] = vm.tags + + result['powerstate'] = dict() + if vm.instance_view: + result['powerstate'] = next((s.code.replace('PowerState/', '') + for s in vm.instance_view.statuses if s.code.startswith('PowerState')), None) + for s in vm.instance_view.statuses: + if s.code.lower() == "osstate/generalized": + result['powerstate'] = 'generalized' + + # Expand network interfaces to include config properties + for interface in vm.network_profile.network_interfaces: + int_dict = azure_id_to_dict(interface.id) + nic = self.get_network_interface(int_dict['resourceGroups'], int_dict['networkInterfaces']) + for interface_dict in result['properties']['networkProfile']['networkInterfaces']: + if interface_dict['id'] == interface.id: + nic_dict = self.serialize_obj(nic, 'NetworkInterface') + interface_dict['name'] = int_dict['networkInterfaces'] + interface_dict['properties'] = nic_dict['properties'] + # Expand public IPs to include config properties + for interface in result['properties']['networkProfile']['networkInterfaces']: + for config in interface['properties']['ipConfigurations']: + if config['properties'].get('publicIPAddress'): + pipid_dict = azure_id_to_dict(config['properties']['publicIPAddress']['id']) + try: + pip = self.network_client.public_ip_addresses.get(pipid_dict['resourceGroups'], + pipid_dict['publicIPAddresses']) + except Exception as exc: + self.fail("Error fetching public ip {0} - {1}".format(pipid_dict['publicIPAddresses'], + str(exc))) + pip_dict = self.serialize_obj(pip, 'PublicIPAddress') + config['properties']['publicIPAddress']['name'] = pipid_dict['publicIPAddresses'] + config['properties']['publicIPAddress']['properties'] = pip_dict['properties'] + + self.log(result, pretty_print=True) + if self.state != 'absent' and not result['powerstate']: + self.fail("Failed to determine PowerState of virtual machine {0}".format(self.name)) + return result + + def power_off_vm(self): + self.log("Powered off virtual machine {0}".format(self.name)) + self.results['actions'].append("Powered off virtual machine {0}".format(self.name)) + try: + poller = self.compute_client.virtual_machines.begin_power_off(self.resource_group, self.name) + self.get_poller_result(poller) + except Exception as exc: + self.fail("Error powering off virtual machine {0} - {1}".format(self.name, str(exc))) + return True + + def power_on_vm(self): + self.results['actions'].append("Powered on virtual machine {0}".format(self.name)) + self.log("Power on virtual machine {0}".format(self.name)) + try: + poller = self.compute_client.virtual_machines.begin_start(self.resource_group, self.name) + self.get_poller_result(poller) + except Exception as exc: + self.fail("Error powering on virtual machine {0} - {1}".format(self.name, str(exc))) + return True + + def restart_vm(self): + self.results['actions'].append("Restarted virtual machine {0}".format(self.name)) + self.log("Restart virtual machine {0}".format(self.name)) + try: + poller = self.compute_client.virtual_machines.begin_restart(self.resource_group, self.name) + self.get_poller_result(poller) + except Exception as exc: + self.fail("Error restarting virtual machine {0} - {1}".format(self.name, str(exc))) + return True + + def deallocate_vm(self): + self.results['actions'].append("Deallocated virtual machine {0}".format(self.name)) + self.log("Deallocate virtual machine {0}".format(self.name)) + try: + poller = self.compute_client.virtual_machines.begin_deallocate(self.resource_group, self.name) + self.get_poller_result(poller) + except Exception as exc: + self.fail("Error deallocating virtual machine {0} - {1}".format(self.name, str(exc))) + return True + + def generalize_vm(self): + self.results['actions'].append("Generalize virtual machine {0}".format(self.name)) + self.log("Generalize virtual machine {0}".format(self.name)) + try: + response = self.compute_client.virtual_machines.generalize(self.resource_group, self.name) + if isinstance(response, LROPoller): + self.get_poller_result(response) + except Exception as exc: + self.fail("Error generalizing virtual machine {0} - {1}".format(self.name, str(exc))) + return True + + def remove_autocreated_resources(self, tags): + if tags: + sa_name = tags.get('_own_sa_') + nic_name = tags.get('_own_nic_') + pip_name = tags.get('_own_pip_') + nsg_name = tags.get('_own_nsg_') + if sa_name: + self.delete_storage_account(self.resource_group, sa_name) + if nic_name: + self.delete_nic(self.resource_group, nic_name) + if pip_name: + self.delete_pip(self.resource_group, pip_name) + if nsg_name: + self.delete_nsg(self.resource_group, nsg_name) + + def delete_vm(self, vm): + vhd_uris = [] + managed_disk_ids = [] + nic_names = [] + pip_names = [] + + if 'all_autocreated' not in self.remove_on_absent: + if self.remove_on_absent.intersection(set(['all', 'virtual_storage'])): + # store the attached vhd info so we can nuke it after the VM is gone + if (vm.storage_profile.os_disk.managed_disk): + self.log('Storing managed disk ID for deletion') + managed_disk_ids.append(vm.storage_profile.os_disk.managed_disk.id) + elif (vm.storage_profile.os_disk.vhd): + self.log('Storing VHD URI for deletion') + vhd_uris.append(vm.storage_profile.os_disk.vhd.uri) + + data_disks = vm.storage_profile.data_disks + for data_disk in data_disks: + if data_disk is not None: + if (data_disk.vhd): + vhd_uris.append(data_disk.vhd.uri) + elif (data_disk.managed_disk): + managed_disk_ids.append(data_disk.managed_disk.id) + + # FUTURE enable diff mode, move these there... + self.log("VHD URIs to delete: {0}".format(', '.join(vhd_uris))) + self.results['deleted_vhd_uris'] = vhd_uris + self.log("Managed disk IDs to delete: {0}".format(', '.join(managed_disk_ids))) + self.results['deleted_managed_disk_ids'] = managed_disk_ids + + if self.remove_on_absent.intersection(set(['all', 'network_interfaces'])): + # store the attached nic info so we can nuke them after the VM is gone + self.log('Storing NIC names for deletion.') + for interface in vm.network_profile.network_interfaces: + id_dict = azure_id_to_dict(interface.id) + nic_names.append(dict(name=id_dict['networkInterfaces'], resource_group=id_dict['resourceGroups'])) + self.log('NIC names to delete {0}'.format(str(nic_names))) + self.results['deleted_network_interfaces'] = nic_names + if self.remove_on_absent.intersection(set(['all', 'public_ips'])): + # also store each nic's attached public IPs and delete after the NIC is gone + for nic_dict in nic_names: + nic = self.get_network_interface(nic_dict['resource_group'], nic_dict['name']) + for ipc in nic.ip_configurations: + if ipc.public_ip_address: + pip_dict = azure_id_to_dict(ipc.public_ip_address.id) + pip_names.append(dict(name=pip_dict['publicIPAddresses'], resource_group=pip_dict['resourceGroups'])) + self.log('Public IPs to delete are {0}'.format(str(pip_names))) + self.results['deleted_public_ips'] = pip_names + + self.log("Deleting virtual machine {0}".format(self.name)) + self.results['actions'].append("Deleted virtual machine {0}".format(self.name)) + try: + poller = self.compute_client.virtual_machines.begin_delete(self.resource_group, self.name) + # wait for the poller to finish + self.get_poller_result(poller) + except Exception as exc: + self.fail("Error deleting virtual machine {0} - {1}".format(self.name, str(exc))) + + # TODO: parallelize nic, vhd, and public ip deletions with begin_deleting + # TODO: best-effort to keep deleting other linked resources if we encounter an error + if self.remove_on_absent.intersection(set(['all', 'virtual_storage'])): + self.log('Deleting VHDs') + self.delete_vm_storage(vhd_uris) + self.log('Deleting managed disks') + self.delete_managed_disks(managed_disk_ids) + + if 'all' in self.remove_on_absent or 'all_autocreated' in self.remove_on_absent: + self.remove_autocreated_resources(vm.tags) + + if self.remove_on_absent.intersection(set(['all', 'network_interfaces'])): + self.log('Deleting network interfaces') + for nic_dict in nic_names: + self.delete_nic(nic_dict['resource_group'], nic_dict['name']) + + if self.remove_on_absent.intersection(set(['all', 'public_ips'])): + self.log('Deleting public IPs') + for pip_dict in pip_names: + self.delete_pip(pip_dict['resource_group'], pip_dict['name']) + + if 'all' in self.remove_on_absent or 'all_autocreated' in self.remove_on_absent: + self.remove_autocreated_resources(vm.tags) + + return True + + def get_network_interface(self, resource_group, name): + try: + nic = self.network_client.network_interfaces.get(resource_group, name) + return nic + except ResourceNotFoundError as exc: + self.fail("Error fetching network interface {0} - {1}".format(name, str(exc))) + return True + + def delete_nic(self, resource_group, name): + self.log("Deleting network interface {0}".format(name)) + self.results['actions'].append("Deleted network interface {0}".format(name)) + try: + poller = self.network_client.network_interfaces.begin_delete(resource_group, name) + except Exception as exc: + self.fail("Error deleting network interface {0} - {1}".format(name, str(exc))) + self.get_poller_result(poller) + # Delete doesn't return anything. If we get this far, assume success + return True + + def delete_pip(self, resource_group, name): + self.results['actions'].append("Deleted public IP {0}".format(name)) + try: + poller = self.network_client.public_ip_addresses.begin_delete(resource_group, name) + self.get_poller_result(poller) + except Exception as exc: + self.fail("Error deleting {0} - {1}".format(name, str(exc))) + # Delete returns nada. If we get here, assume that all is well. + return True + + def delete_nsg(self, resource_group, name): + self.results['actions'].append("Deleted NSG {0}".format(name)) + try: + poller = self.network_client.network_security_groups.begin_delete(resource_group, name) + self.get_poller_result(poller) + except Exception as exc: + self.fail("Error deleting {0} - {1}".format(name, str(exc))) + return True + + def delete_managed_disks(self, managed_disk_ids): + for mdi in managed_disk_ids: + try: + poller = self.rm_client.resources.begin_delete_by_id(mdi, '2017-03-30') + self.get_poller_result(poller) + except Exception as exc: + self.fail("Error deleting managed disk {0} - {1}".format(mdi, str(exc))) + return True + + def delete_storage_account(self, resource_group, name): + self.log("Delete storage account {0}".format(name)) + self.results['actions'].append("Deleted storage account {0}".format(name)) + try: + self.storage_client.storage_accounts.delete(self.resource_group, name) + except Exception as exc: + self.fail("Error deleting storage account {0} - {1}".format(name, str(exc))) + return True + + def delete_vm_storage(self, vhd_uris): + # FUTURE: figure out a cloud_env indepdendent way to delete these + for uri in vhd_uris: + self.log("Extracting info from blob uri '{0}'".format(uri)) + try: + blob_parts = extract_names_from_blob_uri(uri, self._cloud_environment.suffixes.storage_endpoint) + except Exception as exc: + self.fail("Error parsing blob URI {0}".format(str(exc))) + storage_account_name = blob_parts['accountname'] + container_name = blob_parts['containername'] + blob_name = blob_parts['blobname'] + + blob_service_client = self.get_blob_service_client(self.resource_group, storage_account_name) + + self.log("Delete blob {0}:{1}".format(container_name, blob_name)) + self.results['actions'].append("Deleted blob {0}:{1}".format(container_name, blob_name)) + try: + blob_service_client.get_blob_client(container=container_name, blob=blob_name).delete_blob() + except Exception as exc: + self.fail("Error deleting blob {0}:{1} - {2}".format(container_name, blob_name, str(exc))) + return True + + def get_marketplace_image_version(self): + try: + versions = self.compute_client.virtual_machine_images.list(self.location, + self.image['publisher'], + self.image['offer'], + self.image['sku'], + orderby='name') + except Exception as exc: + self.fail("Error fetching image {0} {1} {2} - {3}".format(self.image['publisher'], + self.image['offer'], + self.image['sku'], + str(exc))) + if versions and len(versions) > 0: + if self.image['version'] == 'latest': + return versions[len(versions) - 1] + for version in versions: + if version.name == self.image['version']: + return version + + self.fail("Error could not find image {0} {1} {2} {3}".format(self.image['publisher'], + self.image['offer'], + self.image['sku'], + self.image['version'])) + return None + + def get_custom_image_reference(self, name, resource_group=None): + try: + if resource_group: + vm_images = self.compute_client.images.list_by_resource_group(resource_group) + else: + vm_images = self.compute_client.images.list() + except Exception as exc: + self.fail("Error fetching custom images from subscription - {0}".format(str(exc))) + + for vm_image in vm_images: + if vm_image.name == name: + self.log("Using custom image id {0}".format(vm_image.id)) + return self.compute_models.ImageReference(id=vm_image.id) + + self.fail("Error could not find image with name {0}".format(name)) + return None + + def get_proximity_placement_group(self, resource_group, name): + try: + return self.compute_client.proximity_placement_groups.get(resource_group, name) + except Exception as exc: + self.fail("Error fetching proximity placement group {0} - {1}".format(name, str(exc))) + + def get_availability_set(self, resource_group, name): + try: + return self.compute_client.availability_sets.get(resource_group, name) + except Exception as exc: + self.fail("Error fetching availability set {0} - {1}".format(name, str(exc))) + + def get_storage_account(self, resource_group, name): + try: + account = self.storage_client.storage_accounts.get_properties(resource_group, + name) + return account + except Exception as exc: + self.fail("Error fetching storage account {0} - {1}".format(name, str(exc))) + + def create_or_update_vm(self, params, remove_autocreated_on_failure): + try: + poller = self.compute_client.virtual_machines.begin_create_or_update(self.resource_group, self.name, params) + self.get_poller_result(poller) + except Exception as exc: + if remove_autocreated_on_failure: + self.remove_autocreated_resources(params.tags) + self.fail("Error creating or updating virtual machine {0} - {1}".format(self.name, str(exc))) + + def vm_size_is_valid(self): + ''' + Validate self.vm_size against the list of virtual machine sizes available for the account and location. + + :return: boolean + ''' + try: + sizes = self.compute_client.virtual_machine_sizes.list(self.location) + except Exception as exc: + self.fail("Error retrieving available machine sizes - {0}".format(str(exc))) + for size in sizes: + if size.name == self.vm_size: + return True + return False + + def create_default_storage_account(self, vm_dict=None): + ''' + Create (once) a default storage account XXXX, where XXXX is a random number. + NOTE: If XXXX exists, use it instead of failing. Highly unlikely. + If this method is called multiple times across executions it will return the same + storage account created with the random name which is stored in a tag on the VM. + + vm_dict is passed in during an update, so we can obtain the _own_sa_ tag and return + the default storage account we created in a previous invocation + + :return: storage account object + ''' + account = None + valid_name = False + if self.tags is None: + self.tags = {} + + if self.tags.get('_own_sa_', None): + # We previously created one in the same invocation + return self.get_storage_account(self.resource_group, self.tags['_own_sa_']) + + if vm_dict and vm_dict.get('tags', {}).get('_own_sa_', None): + # We previously created one in a previous invocation + # We must be updating, like adding boot diagnostics + return self.get_storage_account(self.resource_group, vm_dict['tags']['_own_sa_']) + + # Attempt to find a valid storage account name + storage_account_name_base = re.sub('[^a-zA-Z0-9]', '', self.name[:20].lower()) + for i in range(0, 5): + rand = random.randrange(1000, 9999) + storage_account_name = storage_account_name_base + str(rand) + if self.check_storage_account_name(storage_account_name): + valid_name = True + break + + if not valid_name: + self.fail("Failed to create a unique storage account name for {0}. Try using a different VM name." + .format(self.name)) + + try: + account = self.storage_client.storage_accounts.get_properties(self.resource_group, storage_account_name) + except Exception: + pass + + if account: + self.log("Storage account {0} found.".format(storage_account_name)) + self.check_provisioning_state(account) + return account + sku = self.storage_models.Sku(name=self.storage_models.SkuName.standard_lrs) + sku.tier = self.storage_models.SkuTier.standard + kind = self.storage_models.Kind.storage + # pylint: disable=missing-kwoa + parameters = self.storage_models.StorageAccountCreateParameters(sku=sku, kind=kind, location=self.location) + self.log("Creating storage account {0} in location {1}".format(storage_account_name, self.location)) + self.results['actions'].append("Created storage account {0}".format(storage_account_name)) + try: + poller = self.storage_client.storage_accounts.begin_create(self.resource_group, storage_account_name, parameters) + self.get_poller_result(poller) + except Exception as exc: + self.fail("Failed to create storage account: {0} - {1}".format(storage_account_name, str(exc))) + self.tags['_own_sa_'] = storage_account_name + return self.get_storage_account(self.resource_group, storage_account_name) + + def check_storage_account_name(self, name): + self.log("Checking storage account name availability for {0}".format(name)) + try: + account_name = self.storage_models.StorageAccountCheckNameAvailabilityParameters(name=name) + response = self.storage_client.storage_accounts.check_name_availability(account_name) + if response.reason == 'AccountNameInvalid': + raise Exception("Invalid default storage account name: {0}".format(name)) + except Exception as exc: + self.fail("Error checking storage account name availability for {0} - {1}".format(name, str(exc))) + + return response.name_available + + def create_default_nic(self): + ''' + Create a default Network Interface 01. Requires an existing virtual network + with one subnet. If NIC 01 exists, use it. Otherwise, create one. + + :return: NIC object + ''' + + network_interface_name = self.name + '01' + nic = None + if self.tags is None: + self.tags = {} + + self.log("Create default NIC {0}".format(network_interface_name)) + self.log("Check to see if NIC {0} exists".format(network_interface_name)) + try: + nic = self.network_client.network_interfaces.get(self.resource_group, network_interface_name) + except ResourceNotFoundError: + pass + + if nic: + self.log("NIC {0} found.".format(network_interface_name)) + self.check_provisioning_state(nic) + return nic + + self.log("NIC {0} does not exist.".format(network_interface_name)) + + virtual_network_resource_group = None + if self.virtual_network_resource_group: + virtual_network_resource_group = self.virtual_network_resource_group + else: + virtual_network_resource_group = self.resource_group + + if self.virtual_network_name: + try: + self.network_client.virtual_networks.get(virtual_network_resource_group, self.virtual_network_name) + virtual_network_name = self.virtual_network_name + except ResourceNotFoundError as exc: + self.fail("Error: fetching virtual network {0} - {1}".format(self.virtual_network_name, str(exc))) + + else: + # Find a virtual network + no_vnets_msg = "Error: unable to find virtual network in resource group {0}. A virtual network " \ + "with at least one subnet must exist in order to create a NIC for the virtual " \ + "machine.".format(virtual_network_resource_group) + + virtual_network_name = None + try: + vnets = self.network_client.virtual_networks.list(virtual_network_resource_group) + except ResourceNotFoundError: + self.log('cloud error!') + self.fail(no_vnets_msg) + + for vnet in vnets: + virtual_network_name = vnet.name + self.log('vnet name: {0}'.format(vnet.name)) + break + + if not virtual_network_name: + self.fail(no_vnets_msg) + + if self.subnet_name: + try: + subnet = self.network_client.subnets.get(virtual_network_resource_group, virtual_network_name, self.subnet_name) + subnet_id = subnet.id + except CloudError as exc: + self.fail("Error: fetching subnet {0} - {1}".format(self.subnet_name, str(exc))) + else: + no_subnets_msg = "Error: unable to find a subnet in virtual network {0}. A virtual network " \ + "with at least one subnet must exist in order to create a NIC for the virtual " \ + "machine.".format(virtual_network_name) + + subnet_id = None + try: + subnets = self.network_client.subnets.list(virtual_network_resource_group, virtual_network_name) + except Exception: + + self.fail(no_subnets_msg) + + for subnet in subnets: + subnet_id = subnet.id + self.log('subnet id: {0}'.format(subnet_id)) + break + + if not subnet_id: + self.fail(no_subnets_msg) + + pip = None + if self.public_ip_allocation_method != 'Disabled': + self.results['actions'].append('Created default public IP {0}'.format(self.name + '01')) + sku = self.network_models.PublicIPAddressSku(name="Standard") if self.zones else None + pip_facts = self.create_default_pip(self.resource_group, self.location, self.name + '01', self.public_ip_allocation_method, sku=sku) + pip = self.network_models.PublicIPAddress(id=pip_facts.id, location=pip_facts.location, resource_guid=pip_facts.resource_guid, sku=sku) + self.tags['_own_pip_'] = self.name + '01' + + self.tags['_own_nsg_'] = self.name + '01' + + parameters = self.network_models.NetworkInterface( + location=self.location, + ip_configurations=[ + self.network_models.NetworkInterfaceIPConfiguration( + private_ip_allocation_method='Dynamic', + ) + ] + ) + parameters.ip_configurations[0].subnet = self.network_models.Subnet(id=subnet_id) + parameters.ip_configurations[0].name = 'default' + + if self.created_nsg: + self.results['actions'].append('Created default security group {0}'.format(self.name + '01')) + group = self.create_default_securitygroup(self.resource_group, self.location, self.name + '01', self.os_type, + self.open_ports) + parameters.network_security_group = self.network_models.NetworkSecurityGroup(id=group.id, + location=group.location, + resource_guid=group.resource_guid) + + parameters.ip_configurations[0].public_ip_address = pip + + self.log("Creating NIC {0}".format(network_interface_name)) + self.log(self.serialize_obj(parameters, 'NetworkInterface'), pretty_print=True) + self.results['actions'].append("Created NIC {0}".format(network_interface_name)) + try: + poller = self.network_client.network_interfaces.begin_create_or_update(self.resource_group, + network_interface_name, + parameters) + new_nic = self.get_poller_result(poller) + self.tags['_own_nic_'] = network_interface_name + except Exception as exc: + self.fail("Error creating network interface {0} - {1}".format(network_interface_name, str(exc))) + return new_nic + + def parse_network_interface(self, nic): + nic = self.parse_resource_to_dict(nic) + if 'name' not in nic: + self.fail("Invalid network interface {0}".format(str(nic))) + return format_resource_id(val=nic['name'], + subscription_id=nic['subscription_id'], + resource_group=nic['resource_group'], + namespace='Microsoft.Network', + types='networkInterfaces') + + +def main(): + AzureRMVirtualMachine() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachine_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachine_info.py new file mode 100644 index 000000000..50d909c92 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachine_info.py @@ -0,0 +1,543 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 +# Gustavo Muniz do Carmo +# Zim Kalinowski +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_virtualmachine_info + +version_added: "0.1.2" + +short_description: Get virtual machine facts + +description: + - Get facts for one or all virtual machines in a resource group. + +options: + resource_group: + description: + - Name of the resource group containing the virtual machines (required when filtering by vm name). + name: + description: + - Name of the virtual machine. + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Gustavo Muniz do Carmo (@gustavomcarmo) + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Get facts for all virtual machines of a resource group + azure_rm_virtualmachine_info: + resource_group: myResourceGroup + + - name: Get facts by name + azure_rm_virtualmachine_info: + resource_group: myResourceGroup + name: myVm + + - name: Get facts by tags + azure_rm_virtualmachine_info: + resource_group: myResourceGroup + tags: + - testing + - foo:bar +''' + +RETURN = ''' +vms: + description: + - List of virtual machines. + returned: always + type: complex + contains: + admin_username: + description: + - Administrator user name. + returned: always + type: str + sample: admin + boot_diagnostics: + description: + - Information about the boot diagnostics settings. + returned: always + type: complex + contains: + enabled: + description: + - Indicates if boot diagnostics are enabled. + returned: always + type: bool + sample: true + storage_uri: + description: + - Indicates the storage account used by boot diagnostics. + returned: always + type: str + sample: https://mystorageaccountname.blob.core.windows.net/ + console_screenshot_uri: + description: + - Contains a URI to grab a console screenshot. + - Only present if enabled. + returned: always + type: str + sample: https://mystorageaccountname.blob.core.windows.net/bootdiagnostics-myvm01-a4db09a6-ab7f-4d80-9da8-fbceaef9288a/ + myVm.a4db09a6-ab7f-4d80-9da8-fbceaef9288a.screenshot.bmp + serial_console_log_uri: + description: + - Contains a URI to grab the serial console log. + - Only present if enabled. + returned: always + type: str + sample: https://mystorageaccountname.blob.core.windows.net/bootdiagnostics-myvm01-a4db09a6-ab7f-4d80-9da8-fbceaef9288a/ + myVm.a4db09a6-ab7f-4d80-9da8-fbceaef9288a.serialconsole.log + data_disks: + description: + - List of attached data disks. + returned: always + type: complex + contains: + caching: + description: + - Type of data disk caching. + returned: always + type: str + sample: ReadOnly + disk_size_gb: + description: + - The initial disk size in GB for blank data disks. + returned: always + type: int + sample: 64 + lun: + description: + - The logical unit number for data disk. + returned: always + type: int + sample: 0 + managed_disk_type: + description: + - Managed data disk type. + returned: always + type: str + sample: Standard_LRS + managed_disk_id: + description: + - Managed data disk ID. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/Microsoft.Compute/disks/diskName + id: + description: + - Resource ID. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/virtualMachines/myVm + image: + description: + - Image specification. + returned: always + type: complex + contains: + offer: + description: + - The offer of the platform image or marketplace image used to create the virtual machine. + type: str + returned: when created from marketplace image + sample: RHEL + publisher: + description: + - Publisher name. + type: str + returned: when created from marketplace image + sample: RedHat + sku: + description: + - SKU name. + type: str + returned: when created from marketplace image + sample: 7-RAW + version: + description: + - Image version. + type: str + returned: when created from marketplace image + sample: 7.5.2018050901 + id: + description: + - Custom image resource ID. + type: str + returned: when created from custom image + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/images/myImage + location: + description: + - Resource location. + returned: always + type: str + sample: japaneast + name: + description: + - Resource name. + returned: always + type: str + sample: myVm + network_interface_names: + description: + - List of attached network interfaces. + returned: always + type: list + sample: [ + "myNetworkInterface" + ] + proximityPlacementGroup: + description: + - The name or ID of the proximity placement group the VM should be associated with. + type: dict + returned: always + sample: { "id": "/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Compute/proximityPlacementGroups/testid13"} + os_disk_caching: + description: + - Type of OS disk caching. + returned: always + type: str + sample: ReadOnly + os_type: + description: + - Base type of operating system. + returned: always + type: str + sample: Linux + resource_group: + description: + - Resource group. + returned: always + type: str + sample: myResourceGroup + state: + description: + - State of the resource. + returned: always + type: str + sample: present + tags: + description: + - Resource tags. + returned: always + type: dict + sample: { "key1":"value1" } + vm_size: + description: + - Virtual machine size. + returned: always + type: str + sample: Standard_D4 + zones: + description: + - A list of Availability Zones for your VM. + type: list + sample: [1] + power_state: + description: + - Power state of the virtual machine. + returned: always + type: str + sample: running + display_status: + description: + - The short localizable label for the status. + returned: always + type: str + sample: "VM running" + provisioning_state: + description: + - The provisioning state, which only appears in the response. + returned: always + type: str + sample: running + security_profile: + description: + - Specifies the Security related profile settings for the virtual machine. + type: complex + returned: always + contains: + encryption_at_host: + description: + - This property can be used by user in the request to enable or disable the Host Encryption for the virtual machine. + - This will enable the encryption for all the disks including Resource/Temp disk at host itself. + type: bool + returned: always + sample: True + security_type: + description: + - Specifies the SecurityType of the virtual machine. + - It is set as TrustedLaunch to enable UefiSettings. + type: str + returned: always + sample: TrustedLaunch + uefi_settings: + description: + - Specifies the security settings like secure boot and vTPM used while creating the virtual machine. + type: complex + returned: always + contains: + secure_boot_enabled: + description: + - Specifies whether secure boot should be enabled on the virtual machine. + type: bool + returned: always + sample: True + v_tpm_enabled: + description: + - Specifies whether vTPM should be enabled on the virtual machine. + type: bool + returned: always + sample: True +''' + +try: + from msrestazure.tools import parse_resource_id + from azure.core.exceptions import ResourceNotFoundError +except Exception: + # This is handled in azure_rm_common + pass + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +from ansible.module_utils.six.moves.urllib.parse import urlparse +import re + + +AZURE_OBJECT_CLASS = 'VirtualMachine' + +AZURE_ENUM_MODULES = ['azure.mgmt.compute.models'] + + +class AzureRMVirtualMachineInfo(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + resource_group=dict(type='str'), + name=dict(type='str'), + tags=dict(type='list', elements='str') + ) + + self.results = dict( + changed=False, + vms=[] + ) + + self.resource_group = None + self.name = None + self.tags = None + + super(AzureRMVirtualMachineInfo, self).__init__(self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + facts_module=True) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_virtualmachine_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_virtualmachine_facts' module has been renamed to 'azure_rm_virtualmachine_info'", version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.name and not self.resource_group: + self.fail("Parameter error: resource group required when filtering by name.") + if self.name: + self.results['vms'] = self.get_item() + elif self.resource_group: + self.results['vms'] = self.list_items_by_resourcegroup() + else: + self.results['vms'] = self.list_all_items() + + return self.results + + def get_item(self): + self.log('Get properties for {0}'.format(self.name)) + item = None + result = [] + + item = self.get_vm(self.resource_group, self.name) + + if item and self.has_tags(item.get('tags'), self.tags): + result = [item] + + return result + + def list_items_by_resourcegroup(self): + self.log('List all items') + try: + items = self.compute_client.virtual_machines.list(self.resource_group) + except ResourceNotFoundError as exc: + self.fail("Failed to list all items - {0}".format(str(exc))) + + results = [] + for item in items: + if self.has_tags(item.tags, self.tags): + results.append(self.get_vm(self.resource_group, item.name)) + return results + + def list_all_items(self): + self.log('List all items') + try: + items = self.compute_client.virtual_machines.list_all() + except ResourceNotFoundError as exc: + self.fail("Failed to list all items - {0}".format(str(exc))) + + results = [] + for item in items: + if self.has_tags(item.tags, self.tags): + results.append(self.get_vm(parse_resource_id(item.id).get('resource_group'), item.name)) + return results + + def get_vm(self, resource_group, name): + ''' + Get the VM with expanded instanceView + + :return: VirtualMachine object + ''' + try: + vm = self.compute_client.virtual_machines.get(resource_group, name, expand='instanceview') + return self.serialize_vm(vm) + except ResourceNotFoundError as exc: + self.fail("Error getting virtual machine {0} - {1}".format(self.name, str(exc))) + + def serialize_vm(self, vm): + ''' + Convert a VirtualMachine object to dict. + + :param vm: VirtualMachine object + :return: dict + ''' + + result = self.serialize_obj(vm, AZURE_OBJECT_CLASS, enum_modules=AZURE_ENUM_MODULES) + resource_group = parse_resource_id(result['id']).get('resource_group') + instance = None + power_state = None + display_status = None + + try: + instance = self.compute_client.virtual_machines.instance_view(resource_group, vm.name) + instance = self.serialize_obj(instance, AZURE_OBJECT_CLASS, enum_modules=AZURE_ENUM_MODULES) + except Exception as exc: + self.fail("Error getting virtual machine {0} instance view - {1}".format(vm.name, str(exc))) + + for index in range(len(instance['statuses'])): + code = instance['statuses'][index]['code'].split('/') + if code[0] == 'PowerState': + power_state = code[1] + display_status = instance['statuses'][index]['displayStatus'] + elif code[0] == 'OSState' and code[1] == 'generalized': + display_status = instance['statuses'][index]['displayStatus'] + power_state = 'generalized' + break + elif code[0] == 'ProvisioningState' and code[1] == 'failed': + display_status = instance['statuses'][index]['displayStatus'] + power_state = '' + break + + new_result = {} + + if vm.security_profile is not None: + new_result['security_profile'] = dict() + new_result['security_profile']['encryption_at_host'] = vm.security_profile.encryption_at_host + new_result['security_profile']['security_type'] = vm.security_profile.security_type + new_result['security_profile']['uefi_settings'] = dict() + new_result['security_profile']['uefi_settings']['secure_boot_enabled'] = vm.security_profile.uefi_settings.secure_boot_enabled + new_result['security_profile']['uefi_settings']['v_tpm_enabled'] = vm.security_profile.uefi_settings.v_tpm_enabled + + new_result['power_state'] = power_state + new_result['display_status'] = display_status + new_result['provisioning_state'] = vm.provisioning_state + new_result['id'] = vm.id + new_result['resource_group'] = resource_group + new_result['name'] = vm.name + new_result['state'] = 'present' + new_result['location'] = vm.location + new_result['vm_size'] = result['properties']['hardwareProfile']['vmSize'] + new_result['proximityPlacementGroup'] = result['properties'].get('proximityPlacementGroup') + new_result['zones'] = result.get('zones', None) + os_profile = result['properties'].get('osProfile') + if os_profile is not None: + new_result['admin_username'] = os_profile.get('adminUsername') + image = result['properties']['storageProfile'].get('imageReference') + if image is not None: + if image.get('publisher', None) is not None: + new_result['image'] = { + 'publisher': image['publisher'], + 'sku': image['sku'], + 'offer': image['offer'], + 'version': image['version'] + } + else: + new_result['image'] = { + 'id': image.get('id', None) + } + + new_result['boot_diagnostics'] = { + 'enabled': 'diagnosticsProfile' in result['properties'] and + 'bootDiagnostics' in result['properties']['diagnosticsProfile'] and + result['properties']['diagnosticsProfile']['bootDiagnostics']['enabled'] or False, + 'storage_uri': 'diagnosticsProfile' in result['properties'] and + 'bootDiagnostics' in result['properties']['diagnosticsProfile'] and + result['properties']['diagnosticsProfile']['bootDiagnostics'].get('storageUri', None) + } + if new_result['boot_diagnostics']['enabled']: + new_result['boot_diagnostics']['console_screenshot_uri'] = result['properties']['instanceView']['bootDiagnostics'].get('consoleScreenshotBlobUri') + new_result['boot_diagnostics']['serial_console_log_uri'] = result['properties']['instanceView']['bootDiagnostics'].get('serialConsoleLogBlobUri') + + vhd = result['properties']['storageProfile']['osDisk'].get('vhd') + if vhd is not None: + url = urlparse(vhd['uri']) + new_result['storage_account_name'] = url.netloc.split('.')[0] + new_result['storage_container_name'] = url.path.split('/')[1] + new_result['storage_blob_name'] = url.path.split('/')[-1] + + new_result['os_disk_caching'] = result['properties']['storageProfile']['osDisk']['caching'] + new_result['os_type'] = result['properties']['storageProfile']['osDisk']['osType'] + new_result['data_disks'] = [] + disks = result['properties']['storageProfile']['dataDisks'] + for disk_index in range(len(disks)): + new_result['data_disks'].append({ + 'lun': disks[disk_index].get('lun'), + 'name': disks[disk_index].get('name'), + 'disk_size_gb': disks[disk_index].get('diskSizeGB'), + 'managed_disk_type': disks[disk_index].get('managedDisk', {}).get('storageAccountType'), + 'managed_disk_id': disks[disk_index].get('managedDisk', {}).get('id'), + 'caching': disks[disk_index].get('caching') + }) + + new_result['network_interface_names'] = [] + nics = result['properties']['networkProfile']['networkInterfaces'] + for nic_index in range(len(nics)): + new_result['network_interface_names'].append(re.sub('.*networkInterfaces/', '', nics[nic_index]['id'])) + + new_result['tags'] = vm.tags + return new_result + + +def main(): + AzureRMVirtualMachineInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachineextension.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachineextension.py new file mode 100644 index 000000000..0382aceb9 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachineextension.py @@ -0,0 +1,358 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 Sertac Ozercan +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_virtualmachineextension + +version_added: "0.1.2" + +short_description: Managed Azure Virtual Machine extension + +description: + - Create, update and delete Azure Virtual Machine Extension. + - Note that this module was called M(azure.azcollection.azure_rm_virtualmachine_extension) before Ansible 2.8. The usage did not change. + +options: + resource_group: + description: + - Name of a resource group where the vm extension exists or will be created. + required: true + type: str + name: + description: + - Name of the vm extension. + required: true + type: str + state: + description: + - State of the vm extension. Use C(present) to create or update a vm extension and C(absent) to delete a vm extension. + default: present + type: str + choices: + - absent + - present + location: + description: + - Valid Azure location. Defaults to location of the resource group. + type: str + virtual_machine_name: + description: + - The name of the virtual machine where the extension should be create or updated. + required: true + type: str + publisher: + description: + - The name of the extension handler publisher. + type: str + virtual_machine_extension_type: + description: + - The type of the extension handler. + type: str + type_handler_version: + description: + - The type version of the extension handler. + type: str + settings: + description: + - JSON formatted public settings for the extension. + type: dict + protected_settings: + description: + - JSON formatted protected settings for the extension. + - >- + Previously configured settings are not available, so the parameter is not used for idempotency checks. + If changes to this parameter need to be applied, use in conjunction with I(force_update_tag). + type: dict + auto_upgrade_minor_version: + description: + - Whether the extension handler should be automatically upgraded across minor versions. + type: bool + force_update_tag: + description: + - Whether the extension should be updated or re-run even if no changes can be detected from what is currently configured. + - Helpful when applying changes to I(protected_settings). + type: bool + default: false + version_added: '1.10.0' + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Sertac Ozercan (@sozercan) + - Julien Stroheker (@julienstroheker) +''' + +EXAMPLES = ''' +- name: Create VM Extension + azure_rm_virtualmachineextension: + name: myvmextension + location: eastus + resource_group: myResourceGroup + virtual_machine_name: myvm + publisher: Microsoft.Azure.Extensions + virtual_machine_extension_type: CustomScript + type_handler_version: 2.0 + settings: '{"commandToExecute": "hostname"}' + auto_upgrade_minor_version: true + +- name: Delete VM Extension + azure_rm_virtualmachineextension: + name: myvmextension + resource_group: myResourceGroup + virtual_machine_name: myvm + state: absent +''' + +RETURN = ''' +state: + description: + - Current state of the vm extension. + returned: always + type: dict + sample: { "state":"Deleted" } + +changed: + description: + - Whether or not the resource has changed. + returned: always + type: bool + sample: true +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + + +def vmextension_to_dict(extension): + ''' + Serializing the VM Extension from the API to Dict + :return: dict + ''' + return dict( + id=extension.id, + name=extension.name, + location=extension.location, + publisher=extension.publisher, + virtual_machine_extension_type=extension.type_properties_type, + type_handler_version=extension.type_handler_version, + auto_upgrade_minor_version=extension.auto_upgrade_minor_version, + settings=extension.settings, + protected_settings=extension.protected_settings, + ) + + +class AzureRMVMExtension(AzureRMModuleBase): + """Configuration class for an Azure RM VM Extension resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ), + location=dict( + type='str' + ), + virtual_machine_name=dict( + type='str', + required=True + ), + publisher=dict( + type='str' + ), + virtual_machine_extension_type=dict( + type='str' + ), + type_handler_version=dict( + type='str' + ), + auto_upgrade_minor_version=dict( + type='bool' + ), + settings=dict( + type='dict' + ), + protected_settings=dict( + type='dict', no_log=True + ), + force_update_tag=dict( + type='bool', + default=False + ), + ) + + self.resource_group = None + self.name = None + self.virtual_machine_name = None + self.location = None + self.publisher = None + self.virtual_machine_extension_type = None + self.type_handler_version = None + self.auto_upgrade_minor_version = None + self.settings = None + self.protected_settings = None + self.state = None + self.force_update_tag = False + + required_if = [ + ('state', 'present', ['publisher', 'virtual_machine_extension_type', 'type_handler_version']), + ] + + self.results = dict(changed=False, state=dict()) + + super(AzureRMVMExtension, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=False, + supports_tags=False, + required_if=required_if) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()): + setattr(self, key, kwargs[key]) + + if self.module._name == 'azure_rm_virtualmachine_extension': + self.module.deprecate("The 'azure_rm_virtualmachine_extension' module has been renamed to 'azure_rm_virtualmachineextension'", version=(2, 9)) + + resource_group = None + to_be_updated = False + + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + self.location = resource_group.location + + response = self.get_vmextension() + + if self.state == 'present': + if not response: + to_be_updated = True + else: + if self.force_update_tag: + to_be_updated = True + + if self.settings is not None: + if response['settings'] != self.settings: + response['settings'] = self.settings + to_be_updated = True + else: + self.settings = response['settings'] + + if response['location'] != self.location: + self.location = response['location'] + self.module.warn("Property 'location' cannot be changed") + + if response['publisher'] != self.publisher: + self.publisher = response['publisher'] + self.module.warn("Property 'publisher' cannot be changed") + + if response['virtual_machine_extension_type'] != self.virtual_machine_extension_type: + self.virtual_machine_extension_type = response['virtual_machine_extension_type'] + self.module.warn("Property 'virtual_machine_extension_type' cannot be changed") + + if response['type_handler_version'] != self.type_handler_version: + response['type_handler_version'] = self.type_handler_version + to_be_updated = True + + if self.auto_upgrade_minor_version is not None: + if response['auto_upgrade_minor_version'] != self.auto_upgrade_minor_version: + response['auto_upgrade_minor_version'] = self.auto_upgrade_minor_version + to_be_updated = True + else: + self.auto_upgrade_minor_version = response['auto_upgrade_minor_version'] + + if to_be_updated: + self.results['changed'] = True + self.results['state'] = self.create_or_update_vmextension() + elif self.state == 'absent': + if response: + self.delete_vmextension() + self.results['changed'] = True + + return self.results + + def create_or_update_vmextension(self): + ''' + Method calling the Azure SDK to create or update the VM extension. + :return: void + ''' + self.log("Creating VM extension {0}".format(self.name)) + try: + params = self.compute_models.VirtualMachineExtension( + location=self.location, + publisher=self.publisher, + type_properties_type=self.virtual_machine_extension_type, + type_handler_version=self.type_handler_version, + auto_upgrade_minor_version=self.auto_upgrade_minor_version, + settings=self.settings, + protected_settings=self.protected_settings, + force_update_tag=self.force_update_tag, + ) + poller = self.compute_client.virtual_machine_extensions.begin_create_or_update(self.resource_group, self.virtual_machine_name, self.name, params) + response = self.get_poller_result(poller) + return vmextension_to_dict(response) + + except Exception as e: + self.log('Error attempting to create the VM extension.') + self.fail("Error creating the VM extension: {0}".format(str(e))) + + def delete_vmextension(self): + ''' + Method calling the Azure SDK to delete the VM Extension. + :return: void + ''' + self.log("Deleting vmextension {0}".format(self.name)) + try: + poller = self.compute_client.virtual_machine_extensions.begin_delete(self.resource_group, self.virtual_machine_name, self.name) + self.get_poller_result(poller) + except Exception as e: + self.log('Error attempting to delete the vmextension.') + self.fail("Error deleting the vmextension: {0}".format(str(e))) + + def get_vmextension(self): + ''' + Method calling the Azure SDK to get a VM Extension. + :return: void + ''' + self.log("Checking if the vm extension {0} is present".format(self.name)) + found = False + try: + response = self.compute_client.virtual_machine_extensions.get(self.resource_group, self.virtual_machine_name, self.name) + found = True + except ResourceNotFoundError as e: + self.log('Did not find vm extension') + if found: + return vmextension_to_dict(response) + else: + return False + + +def main(): + """Main execution""" + AzureRMVMExtension() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachineextension_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachineextension_info.py new file mode 100644 index 000000000..98f748c2f --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachineextension_info.py @@ -0,0 +1,249 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Zim Kalinowski, (@zikalino) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_virtualmachineextension_info +version_added: "0.1.2" +short_description: Get Azure Virtual Machine Extension facts +description: + - Get facts of Azure Virtual Machine Extension. + +options: + resource_group: + description: + - The name of the resource group. + required: True + type: str + virtual_machine_name: + description: + - The name of the virtual machine containing the extension. + required: True + type: str + name: + description: + - The name of the virtual machine extension. + type: str + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Get information on specific Virtual Machine Extension + azure_rm_virtualmachineextension_info: + resource_group: myResourceGroup + virtual_machine_name: myvm + name: myextension + + - name: List installed Virtual Machine Extensions + azure_rm_virtualmachineextension_info: + resource_group: myResourceGroup + virtual_machine_name: myvm +''' + +RETURN = ''' +extensions: + description: + - A list of dictionaries containing facts for Virtual Machine Extension. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/myvm/testVM/extens + ions/myextension" + resource_group: + description: + - Resource group name. + returned: always + type: str + sample: myResourceGroup + virtual_machine_name: + description: + - Virtual machine name. + returned: always + type: str + sample: myvm + name: + description: + - Virtual machine name. + returned: always + type: str + sample: myextension + location: + description: + - The resource location. + returned: always + type: str + sample: eastus + publisher: + description: + - Extension publisher. + returned: always + type: str + sample: Microsoft.Azure.Extensions + type: + description: + - Extension type. + returned: always + type: str + sample: CustomScript + settings: + description: + - Extension specific settings dictionary. + returned: always + type: dict + sample: { 'commandToExecute':'hostname' } + auto_upgrade_minor_version: + description: + - Autoupgrade minor version flag. + returned: always + type: bool + sample: true + tags: + description: + - Resource tags. + returned: always + type: dict + sample: { "mytag":"abc" } + provisioning_state: + description: + - Provisioning state of the extension. + returned: always + type: str + sample: Succeeded +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMVirtualMachineExtensionInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + virtual_machine_name=dict( + type='str', + required=True + ), + name=dict( + type='str' + ), + tags=dict( + type='list', + elements='str' + ) + ) + # store the results of the module operation + self.results = dict( + changed=False + ) + self.resource_group = None + self.virtual_machine_name = None + self.name = None + self.tags = None + super(AzureRMVirtualMachineExtensionInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False, facts_module=True) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_virtualmachineextension_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_virtualmachineextension_facts' module has been renamed to 'azure_rm_virtualmachineextension_info'", + version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.name is not None: + self.results['extensions'] = self.get_extensions() + else: + self.results['extensions'] = self.list_extensions() + + return self.results + + def get_extensions(self): + response = None + results = [] + try: + response = self.compute_client.virtual_machine_extensions.get(resource_group_name=self.resource_group, + vm_name=self.virtual_machine_name, + vm_extension_name=self.name) + self.log("Response : {0}".format(response)) + except ResourceNotFoundError as e: + self.log('Could not get facts for Virtual Machine Extension.') + + if response and self.has_tags(response.tags, self.tags): + results.append(self.format_response(response)) + + return results + + def list_extensions(self): + response = None + results = [] + try: + response = self.compute_client.virtual_machine_extensions.list(resource_group_name=self.resource_group, + vm_name=self.virtual_machine_name) + self.log("Response : {0}".format(response)) + except ResourceNotFoundError as e: + self.log('Could not get facts for Virtual Machine Extension.') + + if response is not None and response.value is not None: + for item in response.value: + if self.has_tags(item.tags, self.tags): + results.append(self.format_response(item)) + + return results + + def format_response(self, item): + d = item.as_dict() + d = { + 'id': d.get('id', None), + 'resource_group': self.resource_group, + 'virtual_machine_name': self.virtual_machine_name, + 'location': d.get('location'), + 'name': d.get('name'), + 'publisher': d.get('publisher'), + 'type': d.get('type_properties_type'), + 'settings': d.get('settings'), + 'auto_upgrade_minor_version': d.get('auto_upgrade_minor_version'), + 'tags': d.get('tags', None), + 'provisioning_state': d.get('provisioning_state') + } + return d + + +def main(): + AzureRMVirtualMachineExtensionInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachineimage_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachineimage_info.py new file mode 100644 index 000000000..fc18f2dd2 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachineimage_info.py @@ -0,0 +1,256 @@ +#!/usr/bin/python +# +# Copyright (c) 2016 Matt Davis, +# Chris Houseknecht, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_virtualmachineimage_info + +version_added: "0.1.2" + +short_description: Get virtual machine image facts + +description: + - Get facts for virtual machine images. + +options: + location: + description: + - Azure location value, for example C(westus), C(eastus), C(eastus2), C(northcentralus), etc. + - Supplying only a location value will yield a list of available publishers for the location. + required: true + publisher: + description: + - Name of an image publisher. List image offerings associated with a particular publisher. + offer: + description: + - Name of an image offering. Combine with SKU to see a list of available image versions. + sku: + description: + - Image offering SKU. Combine with offer to see a list of available versions. + version: + description: + - Specific version number of an image. + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Chris Houseknecht (@chouseknecht) + - Matt Davis (@nitzmahone) + +''' + +EXAMPLES = ''' + - name: Get facts for a specific image + azure_rm_virtualmachineimage_info: + location: eastus + publisher: OpenLogic + offer: CentOS + sku: '7.1' + version: '7.1.20160308' + + - name: List available versions + azure_rm_virtualmachineimage_info: + location: eastus + publisher: OpenLogic + offer: CentOS + sku: '7.1' + + - name: List available offers + azure_rm_virtualmachineimage_info: + location: eastus + publisher: OpenLogic + + - name: List available publishers + azure_rm_virtualmachineimage_info: + location: eastus + +''' + +RETURN = ''' +azure_vmimages: + description: + - List of image dicts. + returned: always + type: list + example: [ { + "id": "/Subscriptions/xxx...xxx/Providers/Microsoft.Compute/Locations/eastus/ + Publishers/OpenLogic/ArtifactTypes/VMImage/Offers/CentOS/Skus/7.1/Versions/7.1.20150410", + "location": "eastus", + "name": "7.1.20150410" + }, + { + "id": "/Subscriptions/xxx...xxx/Providers/Microsoft.Compute/Locations/eastus/ + Publishers/OpenLogic/ArtifactTypes/VMImage/Offers/CentOS/Skus/7.1/Versions/7.1.20150605", + "location": "eastus", + "name": "7.1.20150605" + }, + { + "id": "/Subscriptions/xxx...xxx/Providers/Microsoft.Compute/Locations/eastus/ + Publishers/OpenLogic/ArtifactTypes/VMImage/Offers/CentOS/Skus/7.1/Versions/7.1.20150731", + "location": "eastus", + "name": "7.1.20150731" + }, + { + "id": "/Subscriptions/xxx...xxx/Providers/Microsoft.Compute/Locations/eastus/ + Publishers/OpenLogic/ArtifactTypes/VMImage/Offers/CentOS/Skus/7.1/Versions/7.1.20160308", + "location": "eastus", + "name": "7.1.20160308" + } + ] +''' + +try: + from azure.core.exceptions import ResourceNotFoundError +except Exception: + # This is handled in azure_rm_common + pass + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + + +AZURE_ENUM_MODULES = ['azure.mgmt.compute.models'] + + +class AzureRMVirtualMachineImageInfo(AzureRMModuleBase): + + def __init__(self, **kwargs): + + self.module_arg_spec = dict( + location=dict(type='str', required=True), + publisher=dict(type='str'), + offer=dict(type='str'), + sku=dict(type='str'), + version=dict(type='str') + ) + + self.results = dict( + changed=False, + ) + + self.location = None + self.publisher = None + self.offer = None + self.sku = None + self.version = None + + super(AzureRMVirtualMachineImageInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_virtualmachineimage_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_virtualmachineimage_facts' module has been renamed to 'azure_rm_virtualmachineimage_info'", version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if is_old_facts: + self.results['ansible_facts'] = dict() + if self.location and self.publisher and self.offer and self.sku and self.version: + self.results['ansible_facts']['azure_vmimages'] = self.get_item() + elif self.location and self.publisher and self.offer and self.sku: + self.results['ansible_facts']['azure_vmimages'] = self.list_images() + elif self.location and self.publisher: + self.results['ansible_facts']['azure_vmimages'] = self.list_offers() + elif self.location: + self.results['ansible_facts']['azure_vmimages'] = self.list_publishers() + else: + if self.location and self.publisher and self.offer and self.sku and self.version: + self.results['vmimages'] = self.get_item() + elif self.location and self.publisher and self.offer and self.sku: + self.results['vmimages'] = self.list_images() + elif self.location and self.publisher: + self.results['vmimages'] = self.list_offers() + elif self.location: + self.results['vmimages'] = self.list_publishers() + + return self.results + + def get_item(self): + item = None + result = [] + versions = None + + try: + versions = self.compute_client.virtual_machine_images.list(self.location, + self.publisher, + self.offer, + self.sku, + top=1, + orderby='name desc') + except ResourceNotFoundError: + pass + + if self.version == 'latest': + item = versions[-1] + else: + for version in versions: + if version.name == self.version: + item = version + + if item: + result = [self.serialize_obj(item, 'VirtualMachineImage', enum_modules=AZURE_ENUM_MODULES)] + + return result + + def list_images(self): + response = None + results = [] + try: + response = self.compute_client.virtual_machine_images.list(self.location, + self.publisher, + self.offer, + self.sku,) + except ResourceNotFoundError as exc: + self.fail("Failed to list images: {0}".format(str(exc))) + + if response: + for item in response: + results.append(self.serialize_obj(item, 'VirtualMachineImageResource', + enum_modules=AZURE_ENUM_MODULES)) + return results + + def list_offers(self): + response = None + results = [] + try: + response = self.compute_client.virtual_machine_images.list_offers(self.location, + self.publisher) + except ResourceNotFoundError as exc: + self.fail("Failed to list offers: {0}".format(str(exc))) + + if response: + for item in response: + results.append(self.serialize_obj(item, 'VirtualMachineImageResource', + enum_modules=AZURE_ENUM_MODULES)) + return results + + def list_publishers(self): + response = None + results = [] + try: + response = self.compute_client.virtual_machine_images.list_publishers(self.location) + except ResourceNotFoundError as exc: + self.fail("Failed to list publishers: {0}".format(str(exc))) + + if response: + for item in response: + results.append(self.serialize_obj(item, 'VirtualMachineImageResource', + enum_modules=AZURE_ENUM_MODULES)) + return results + + +def main(): + AzureRMVirtualMachineImageInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinescaleset.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinescaleset.py new file mode 100644 index 000000000..ee3b822b5 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinescaleset.py @@ -0,0 +1,1489 @@ +#!/usr/bin/python +# +# Copyright (c) 2016 Sertac Ozercan, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_virtualmachinescaleset + +version_added: "0.1.2" + +short_description: Manage Azure virtual machine scale sets + +description: + - Create and update a virtual machine scale set. + - Note that this module was called M(azure.azcollection.azure_rm_virtualmachine_scaleset) before Ansible 2.8. The usage did not change. + +options: + resource_group: + description: + - Name of the resource group containing the virtual machine scale set. + required: true + name: + description: + - Name of the virtual machine. + required: true + state: + description: + - Assert the state of the virtual machine scale set. + - State C(present) will check that the machine exists with the requested configuration. If the configuration + of the existing machine does not match, the machine will be updated. + - State C(absent) will remove the virtual machine scale set. + default: present + choices: + - absent + - present + location: + description: + - Valid Azure location. Defaults to location of the resource group. + short_hostname: + description: + - Short host name. + vm_size: + description: + - A valid Azure VM size value. For example, C(Standard_D4). + - The list of choices varies depending on the subscription and location. Check your subscription for available choices. + capacity: + description: + - Capacity of VMSS. + default: 1 + tier: + description: + - SKU Tier. + choices: + - Basic + - Standard + upgrade_policy: + description: + - Upgrade policy. + - Required when creating the Azure virtual machine scale sets. + choices: + - Manual + - Automatic + priority: + description: + - Priority of the VMSS. + - C(None) is the equivalent of Regular VM. + choices: + - None + - Spot + eviction_policy: + description: + - Specifies the eviction policy for the Azure Spot virtual machine. + - Requires priority to be set to Spot. + choices: + - Deallocate + - Delete + max_price: + description: + - Specifies the maximum price you are willing to pay for a Azure Spot VM/VMSS. + - This price is in US Dollars. + - C(-1) indicates default price to be up-to on-demand. + - Requires priority to be set to Spot. + default: -1 + admin_username: + description: + - Admin username used to access the host after it is created. Required when creating a VM. + admin_password: + description: + - Password for the admin username. + - Not required if the os_type is Linux and SSH password authentication is disabled by setting I(ssh_password_enabled=false). + ssh_password_enabled: + description: + - When the os_type is Linux, setting I(ssh_password_enabled=false) will disable SSH password authentication and require use of SSH keys. + type: bool + default: true + ssh_public_keys: + description: + - For I(os_type=Linux) provide a list of SSH keys. + - Each item in the list should be a dictionary where the dictionary contains two keys, C(path) and C(key_data). + - Set the C(path) to the default location of the authorized_keys files. + - On an Enterprise Linux host, for example, the I(path=/home//.ssh/authorized_keys). + Set C(key_data) to the actual value of the public key. + image: + description: + - Specifies the image used to build the VM. + - If a string, the image is sourced from a custom image based on the name. + - If a dict with the keys I(publisher), I(offer), I(sku), and I(version), the image is sourced from a Marketplace image. + Note that set I(version=latest) to get the most recent version of a given image. + - If a dict with the keys I(name) and I(resource_group), the image is sourced from a custom image based on the I(name) and I(resource_group) set. + Note that the key I(resource_group) is optional and if omitted, all images in the subscription will be searched for by I(name). + - Custom image support was added in Ansible 2.5. + required: true + os_disk_caching: + description: + - Type of OS disk caching. + choices: + - ReadOnly + - ReadWrite + default: ReadOnly + aliases: + - disk_caching + os_disk_size_gb: + description: + - Specifies the size of the operating system disk in gigabytes. + - This can be used to overwrite the size of the disk in a virtual machine image. + type: int + os_type: + description: + - Base type of operating system. + choices: + - Windows + - Linux + default: Linux + ephemeral_os_disk: + description: + - Parameters of ephemeral disk settings that can be specified for operating system disk. + - Ephemeral OS disk is only supported for VMS Instances using Managed Disk. + type: bool + managed_disk_type: + description: + - Managed disk type. + choices: + - Standard_LRS + - Premium_LRS + - StandardSSD_LRS + - UltraSSD_LRS + - Premium_ZRS + - StandardSSD_ZRS + data_disks: + description: + - Describes list of data disks. + suboptions: + lun: + description: + - The logical unit number for data disk. + default: 0 + disk_size_gb: + description: + - The initial disk size in GB for blank data disks. + managed_disk_type: + description: + - Managed data disk type. + choices: + - Standard_LRS + - Premium_LRS + - StandardSSD_LRS + - UltraSSD_LRS + - Premium_ZRS + - StandardSSD_ZRS + caching: + description: + - Type of data disk caching. + choices: + - ReadOnly + - ReadWrite + default: ReadOnly + virtual_network_resource_group: + description: + - When creating a virtual machine, if a specific virtual network from another resource group should be + used. + - Use this parameter to specify the resource group to use. + virtual_network_name: + description: + - Virtual Network name. + aliases: + - virtual_network + subnet_name: + description: + - Subnet name. + aliases: + - subnet + public_ip_per_vm: + description: + - Assign a public IP to each virtual machine of the scale set + type: bool + default: False + load_balancer: + description: + - Load balancer name. + application_gateway: + description: + - Application gateway name. + remove_on_absent: + description: + - When removing a VM using I(state=absent), also remove associated resources. + - It can be C(all) or a list with any of the following ['network_interfaces', 'virtual_storage', 'public_ips']. + - Any other input will be ignored. + default: ['all'] + enable_accelerated_networking: + description: + - Indicates whether user wants to allow accelerated networking for virtual machines in scaleset being created. + type: bool + security_group: + description: + - Existing security group with which to associate the subnet. + - It can be the security group name which is in the same resource group. + - It can be the resource ID. + - It can be a dict which contains I(name) and I(resource_group) of the security group. + aliases: + - security_group_name + overprovision: + description: + - Specifies whether the Virtual Machine Scale Set should be overprovisioned. + type: bool + single_placement_group: + description: + - When true this limits the scale set to a single placement group, of max size 100 virtual machines. + type: bool + default: False + plan: + description: + - Third-party billing plan for the VM. + type: dict + suboptions: + name: + description: + - Billing plan name. + required: true + product: + description: + - Product name. + required: true + publisher: + description: + - Publisher offering the plan. + required: true + promotion_code: + description: + - Optional promotion code. + zones: + description: + - A list of Availability Zones for your virtual machine scale set. + type: list + custom_data: + description: + - Data which is made available to the virtual machine and used by e.g., C(cloud-init). + - Many images in the marketplace are not cloud-init ready. Thus, data sent to I(custom_data) would be ignored. + - If the image you are attempting to use is not listed in + U(https://docs.microsoft.com/en-us/azure/virtual-machines/linux/using-cloud-init#cloud-init-overview), + follow these steps U(https://docs.microsoft.com/en-us/azure/virtual-machines/linux/cloudinit-prepare-custom-image). + scale_in_policy: + description: + - define the order in which vmss instances are scaled-in + choices: + - Default + - NewestVM + - OldestVM + terminate_event_timeout_minutes: + description: + - timeout time for termination notification event + - in range between 5 and 15 + platform_fault_domain_count: + description: + - Fault Domain count for each placement group. + type: int + default: 1 + orchestration_mode: + description: + - Specifies the orchestration mode for the virtual machine scale set. + - When I(orchestration_mode=Flexible), I(public_ip_per_vm=True) must be set. + - When I(orchestration_mode=Flexible), I(platform_fault_domain_count) must be set. + - When I(orchestration_mode=Flexible), I(single_placement_group=False) must be set. + - When I(orchestration_mode=Flexible), it cannot be configured I(overprovision). + - When I(orchestration_mode=Flexible), it cannot be configured I(upgrade_policy) and configured when I(orchestration_mode=Uniform). + type: str + choices: + - Flexible + - Uniform + security_profile: + description: + - Specifies the Security related profile settings for the virtual machine sclaset. + type: dict + suboptions: + encryption_at_host: + description: + - This property can be used by user in the request to enable or disable the Host Encryption for the virtual machine sclaset. + - This will enable the encryption for all the disks including Resource/Temp disk at host itself. + type: bool + security_type: + description: + - Specifies the SecurityType of the virtual machine sclaset. + - It is set as TrustedLaunch to enable UefiSettings. + type: str + choices: + - TrustedLaunch + uefi_settings: + description: + - Specifies the security settings like secure boot and vTPM used while creating the virtual machine scalset. + type: dict + suboptions: + secure_boot_enabled: + description: + - Specifies whether secure boot should be enabled on the virtual machine sclaset. + type: bool + v_tpm_enabled: + description: + - Specifies whether vTPM should be enabled on the virtual machine scalset. + type: bool + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Sertac Ozercan (@sozercan) + +''' +EXAMPLES = ''' + +- name: Create VMSS + azure_rm_virtualmachinescaleset: + resource_group: myResourceGroup + name: testvmss + vm_size: Standard_DS1_v2 + capacity: 2 + virtual_network_name: testvnet + upgrade_policy: Manual + subnet_name: testsubnet + terminate_event_timeout_minutes: 10 + scale_in_policy: NewestVM + admin_username: "{{ username }}" + ssh_password_enabled: false + ssh_public_keys: + - path: /home/adminUser/.ssh/authorized_keys + key_data: < insert your ssh public key here... > + managed_disk_type: Standard_LRS + image: + offer: 0001-com-ubuntu-server-focal + publisher: canonical + sku: 20_04-lts-gen2 + version: latest + data_disks: + - lun: 0 + disk_size_gb: 64 + caching: ReadWrite + managed_disk_type: Standard_LRS + +- name: Create VMSS with an image that requires plan information + azure_rm_virtualmachinescaleset: + resource_group: myResourceGroup + name: testvmss + vm_size: Standard_DS1_v2 + capacity: 3 + virtual_network_name: testvnet + upgrade_policy: Manual + subnet_name: testsubnet + admin_username: "{{ username }}" + ssh_password_enabled: false + ssh_public_keys: + - path: /home/adminUser/.ssh/authorized_keys + key_data: < insert your ssh public key here... > + managed_disk_type: Standard_LRS + image: + offer: cis-ubuntu-linux-1804-l1 + publisher: center-for-internet-security-inc + sku: Stable + version: latest + plan: + name: cis-ubuntu-linux-1804-l1 + product: cis-ubuntu-linux-1804-l1 + publisher: center-for-internet-security-inc + data_disks: + - lun: 0 + disk_size_gb: 64 + caching: ReadWrite + managed_disk_type: Standard_LRS + +- name: Create a VMSS with a custom image + azure_rm_virtualmachinescaleset: + resource_group: myResourceGroup + name: testvmss + vm_size: Standard_DS1_v2 + capacity: 2 + virtual_network_name: testvnet + upgrade_policy: Manual + subnet_name: testsubnet + admin_username: "{{ username }}" + admin_password: "{{ password }}" + managed_disk_type: Standard_LRS + image: customimage001 + +- name: Create a VMSS with over 100 instances + azure_rm_virtualmachinescaleset: + resource_group: myResourceGroup + name: testvmss + vm_size: Standard_DS1_v2 + capacity: 120 + single_placement_group: False + virtual_network_name: testvnet + upgrade_policy: Manual + subnet_name: testsubnet + admin_username: "{{ username }}" + admin_password: "{{ password }}" + managed_disk_type: Standard_LRS + image: customimage001 + +- name: Create a VMSS with a custom image from a particular resource group + azure_rm_virtualmachinescaleset: + resource_group: myResourceGroup + name: testvmss + vm_size: Standard_DS1_v2 + capacity: 2 + virtual_network_name: testvnet + upgrade_policy: Manual + subnet_name: testsubnet + admin_username: "{{ username }}" + admin_password: "{{ password }}" + managed_disk_type: Standard_LRS + image: + name: customimage001 + resource_group: myResourceGroup + +- name: Create a VMSS with Spot Instance + azure_rm_virtualmachinescaleset: + resource_group: myResourceGroup + name: testvmss + vm_size: Standard_DS1_v2 + capacity: 5 + priority: Spot + eviction_policy: Deallocate + virtual_network_name: testvnet + upgrade_policy: Manual + subnet_name: testsubnet + admin_username: "{{ username }}" + admin_password: "{{ password }}" + managed_disk_type: Standard_LRS + image: customimage001 + +- name: Create VMSS with security group + azure_rm_virtualmachinescaleset: + resource_group: "{{ resource_group }}" + name: testVMSS{{ rpfx }} + vm_size: Standard_D4s_v3 + admin_username: testuser + single_placement_group: False + platform_fault_domain_count: 1 + public_ip_per_vm: True + ssh_password_enabled: false + ssh_public_keys: + - path: /home/testuser/.ssh/authorized_keys + key_data: "ssh-rsa ****" + virtual_network_name: VMSStestVnet + subnet_name: VMSStestSubnet + managed_disk_type: Standard_LRS + orchestration_mode: Flexible + os_disk_caching: ReadWrite + security_profile: + uefi_settings: + secure_boot_enabled: True + v_tpm_enabled: False + encryption_at_host: False + security_type: TrustedLaunch + image: + offer: 0001-com-ubuntu-server-jammy + publisher: Canonical + sku: 22_04-lts-gen2 + version: latest + data_disks: + - lun: 0 + disk_size_gb: 64 + caching: ReadWrite + managed_disk_type: Standard_LRS +''' + +RETURN = ''' +azure_vmss: + description: + - Facts about the current state of the object. + - Note that facts are not part of the registered output but available directly. + returned: always + type: dict + sample: { + "properties": { + "overprovision": true, + "platformFaultDomainCount": 1, + "orchestrationMode": "Flexible", + "scaleInPolicy": { + "rules": [ + "NewestVM" + ] + }, + "singlePlacementGroup": true, + "upgradePolicy": { + "mode": "Manual" + }, + "virtualMachineProfile": { + "networkProfile": { + "networkInterfaceConfigurations": [ + { + "name": "testvmss", + "properties": { + "dnsSettings": { + "dnsServers": [] + }, + "enableAcceleratedNetworking": false, + "ipConfigurations": [ + { + "name": "default", + "properties": { + "privateIPAddressVersion": "IPv4", + "subnet": { + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/virtualNetworks/testvnet/subnets/testsubnet" + } + } + } + ], + "primary": true + } + } + ] + }, + "osProfile": { + "adminUsername": "testuser", + "computerNamePrefix": "testvmss", + "linuxConfiguration": { + "disablePasswordAuthentication": true, + "ssh": { + "publicKeys": [ + { + "keyData": "", + "path": "/home/testuser/.ssh/authorized_keys" + } + ] + } + }, + "secrets": [] + }, + "scheduledEventsProfile": { + "terminateNotificationProfile": { + "enable": true, + "notBeforeTimeout": "PT10M" + } + }, + "storageProfile": { + "dataDisks": [ + { + "caching": "ReadWrite", + "createOption": "empty", + "diskSizeGB": 64, + "lun": 0, + "managedDisk": { + "storageAccountType": "Standard_LRS" + } + } + ], + "imageReference": { + "offer": "0001-com-ubuntu-server-focal", + "publisher": "canonical", + "sku": "20_04-lts-gen2", + "version": "20.04.202111210" + }, + "securityProfile": { + "encryptionAtHost": false, + "securityType": "TrustedLaunch", + "uefiSettings": { + "secureBootEnabled": true, + "vTpmEnabled": false + } + }, + "osDisk": { + "caching": "ReadWrite", + "createOption": "fromImage", + "managedDisk": { + "storageAccountType": "Standard_LRS" + } + } + } + } + }, + "sku": { + "capacity": 2, + "name": "Standard_DS1_v2", + "tier": "Standard" + }, + "tags": null, + "type": "Microsoft.Compute/virtualMachineScaleSets" + } +''' # NOQA + +import base64 + +try: + from msrestazure.azure_exceptions import CloudError + from azure.core.exceptions import ResourceNotFoundError + from msrestazure.tools import parse_resource_id + from azure.core.exceptions import ResourceNotFoundError + +except ImportError: + # This is handled in azure_rm_common + pass + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase, azure_id_to_dict, format_resource_id +from ansible.module_utils.basic import to_native, to_bytes + + +AZURE_OBJECT_CLASS = 'VirtualMachineScaleSet' + +AZURE_ENUM_MODULES = ['azure.mgmt.compute.models'] + + +class AzureRMVirtualMachineScaleSet(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True), + name=dict(type='str', required=True), + state=dict(choices=['present', 'absent'], default='present', type='str'), + location=dict(type='str'), + short_hostname=dict(type='str'), + vm_size=dict(type='str'), + tier=dict(type='str', choices=['Basic', 'Standard']), + capacity=dict(type='int', default=1), + upgrade_policy=dict(type='str', choices=['Automatic', 'Manual']), + priority=dict(type='str', choices=['None', 'Spot']), + eviction_policy=dict(type='str', choices=['Deallocate', 'Delete']), + max_price=dict(type='float', default=-1), + admin_username=dict(type='str'), + admin_password=dict(type='str', no_log=True), + ssh_password_enabled=dict(type='bool', default=True), + ssh_public_keys=dict(type='list'), + image=dict(type='raw'), + os_disk_caching=dict(type='str', aliases=['disk_caching'], choices=['ReadOnly', 'ReadWrite'], + default='ReadOnly'), + os_type=dict(type='str', choices=['Linux', 'Windows'], default='Linux'), + managed_disk_type=dict(type='str', choices=['Standard_LRS', 'Premium_LRS', 'StandardSSD_LRS', 'UltraSSD_LRS', 'Premium_ZRS', 'StandardSSD_ZRS']), + data_disks=dict(type='list'), + subnet_name=dict(type='str', aliases=['subnet']), + public_ip_per_vm=dict(type='bool', default=False), + load_balancer=dict(type='str'), + application_gateway=dict(type='str'), + virtual_network_resource_group=dict(type='str'), + virtual_network_name=dict(type='str', aliases=['virtual_network']), + remove_on_absent=dict(type='list', default=['all']), + enable_accelerated_networking=dict(type='bool'), + security_group=dict(type='raw', aliases=['security_group_name']), + overprovision=dict(type='bool'), + single_placement_group=dict(type='bool', default=False), + zones=dict(type='list'), + custom_data=dict(type='str'), + plan=dict(type='dict', options=dict(publisher=dict(type='str', required=True), + product=dict(type='str', required=True), name=dict(type='str', required=True), + promotion_code=dict(type='str'))), + scale_in_policy=dict(type='str', choices=['Default', 'OldestVM', 'NewestVM']), + terminate_event_timeout_minutes=dict(type='int'), + ephemeral_os_disk=dict(type='bool'), + orchestration_mode=dict(type='str', choices=['Uniform', 'Flexible']), + platform_fault_domain_count=dict(type='int', default=1), + os_disk_size_gb=dict(type='int'), + security_profile=dict( + type='dict', + options=dict( + encryption_at_host=dict(type='bool'), + security_type=dict(type='str', choices=['TrustedLaunch']), + uefi_settings=dict( + type='dict', + options=dict( + secure_boot_enabled=dict(type='bool'), + v_tpm_enabled=dict(type='bool'), + ) + ) + ) + ), + ) + + self.resource_group = None + self.name = None + self.state = None + self.location = None + self.short_hostname = None + self.vm_size = None + self.capacity = None + self.tier = None + self.upgrade_policy = None + self.priority = None + self.eviction_policy = None + self.admin_username = None + self.admin_password = None + self.ssh_password_enabled = None + self.ssh_public_keys = None + self.image = None + self.os_disk_caching = None + self.managed_disk_type = None + self.data_disks = None + self.os_type = None + self.subnet_name = None + self.virtual_network_resource_group = None + self.virtual_network_name = None + self.public_ip_per_vm = None + self.tags = None + self.differences = None + self.load_balancer = None + self.application_gateway = None + self.enable_accelerated_networking = None + self.security_group = None + self.overprovision = None + self.zones = None + self.custom_data = None + self.plan = None + self.scale_in_policy = None + self.terminate_event_timeout_minutes = None + self.ephemeral_os_disk = None + self.orchestration_mode = None + self.os_disk_size_gb = None + self.security_profile = None + + mutually_exclusive = [('load_balancer', 'application_gateway')] + self.results = dict( + changed=False, + actions=[], + ansible_facts=dict(azure_vmss=None) + ) + + super(AzureRMVirtualMachineScaleSet, self).__init__( + derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + if self.module._name == 'azure_rm_virtualmachine_scaleset': + self.module.deprecate("The 'azure_rm_virtualmachine_scaleset' module has been renamed to 'azure_rm_virtualmachinescaleset'", version=(2, 9)) + + # make sure options are lower case + self.remove_on_absent = set([resource.lower() for resource in self.remove_on_absent]) + + # convert elements to ints + self.zones = [int(i) for i in self.zones] if self.zones else None + + # default virtual_network_resource_group to resource_group + if not self.virtual_network_resource_group: + self.virtual_network_resource_group = self.resource_group + + changed = False + results = dict() + vmss = None + disable_ssh_password = None + subnet = None + image_reference = None + load_balancer_backend_address_pools = None + load_balancer_inbound_nat_pools = None + load_balancer = None + application_gateway = None + application_gateway_backend_address_pools = None + support_lb_change = True + public_ip_address_configuration = None + + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + # Set default location + self.location = resource_group.location + + if self.custom_data: + self.custom_data = to_native(base64.b64encode(to_bytes(self.custom_data))) + + if self.state == 'present': + # Verify parameters and resolve any defaults + + if self.vm_size and not self.vm_size_is_valid(): + self.fail("Parameter error: vm_size {0} is not valid for your subscription and location.".format( + self.vm_size + )) + + # if self.virtual_network_name: + # virtual_network = self.get_virtual_network(self.virtual_network_name) + + if self.ssh_public_keys: + msg = "Parameter error: expecting ssh_public_keys to be a list of type dict where " \ + "each dict contains keys: path, key_data." + for key in self.ssh_public_keys: + if not isinstance(key, dict): + self.fail(msg) + if not key.get('path') or not key.get('key_data'): + self.fail(msg) + + if self.image and isinstance(self.image, dict): + if all(key in self.image for key in ('publisher', 'offer', 'sku', 'version')): + marketplace_image = self.get_marketplace_image_version() + if self.image['version'] == 'latest': + self.image['version'] = marketplace_image.name + self.log("Using image version {0}".format(self.image['version'])) + + image_reference = self.compute_models.ImageReference( + publisher=self.image['publisher'], + offer=self.image['offer'], + sku=self.image['sku'], + version=self.image['version'] + ) + elif self.image.get('name'): + custom_image = True + image_reference = self.get_custom_image_reference( + self.image.get('name'), + self.image.get('resource_group')) + elif self.image.get('id'): + try: + image_reference = self.compute_models.ImageReference(id=self.image['id']) + except Exception as exc: + self.fail("id Error: Cannot get image from the reference id - {0}".format(self.image['id'])) + else: + self.fail("parameter error: expecting image to contain [publisher, offer, sku, version], [name, resource_group] or [id]") + elif self.image and isinstance(self.image, str): + custom_image = True + image_reference = self.get_custom_image_reference(self.image) + elif self.image: + self.fail("parameter error: expecting image to be a string or dict not {0}".format(type(self.image).__name__)) + + disable_ssh_password = not self.ssh_password_enabled + + if self.load_balancer: + load_balancer = self.get_load_balancer(self.load_balancer) + load_balancer_backend_address_pools = ([self.compute_models.SubResource(id=resource.id) + for resource in load_balancer.backend_address_pools] + if load_balancer.backend_address_pools else None) + load_balancer_inbound_nat_pools = ([self.compute_models.SubResource(id=resource.id) + for resource in load_balancer.inbound_nat_pools] + if load_balancer.inbound_nat_pools else None) + + if self.application_gateway: + application_gateway = self.get_application_gateway(self.application_gateway) + application_gateway_backend_address_pools = ([self.compute_models.SubResource(id=resource.id) + for resource in application_gateway.backend_address_pools] + if application_gateway.backend_address_pools else None) + + if self.public_ip_per_vm: + public_ip_address_configuration = self.compute_models.VirtualMachineScaleSetPublicIPAddressConfiguration(name='instancepublicip') + + try: + self.log("Fetching virtual machine scale set {0}".format(self.name)) + vmss = self.compute_client.virtual_machine_scale_sets.get(self.resource_group, self.name) + self.check_provisioning_state(vmss, self.state) + vmss_dict = self.serialize_vmss(vmss) + + if self.state == 'present': + differences = [] + results = vmss_dict + current_osdisk = vmss_dict['properties']['virtualMachineProfile']['storageProfile']['osDisk'] + current_ephemeral = current_osdisk.get('diffDiskSettings', None) + current_properties = vmss_dict['properties']['virtualMachineProfile'] + + if self.priority and self.priority != current_properties.get('priority', 'None'): + self.fail('VM Priority is not updatable: requested virtual machine priority is {0}'.format(self.priority)) + if self.eviction_policy and \ + self.eviction_policy != current_properties.get('evictionPolicy', None): + self.fail('VM Eviction Policy is not updatable: requested virtual machine eviction policy is {0}'.format(self.eviction_policy)) + if self.max_price and \ + vmss_dict['properties']['virtualMachineProfile'].get('billingProfile', None) and \ + self.max_price != vmss_dict['properties']['virtualMachineProfile']['billingProfile'].get('maxPrice', None): + self.fail('VM Maximum Price is not updatable: requested virtual machine maximum price is {0}'.format(self.max_price)) + + if self.ephemeral_os_disk and current_ephemeral is None: + self.fail('Ephemeral OS disk not updatable: virtual machine scale set ephemeral OS disk is {0}'.format(self.ephemeral_os_disk)) + elif not self.ephemeral_os_disk and current_ephemeral is not None: + self.fail('Ephemeral OS disk not updatable: virtual machine scale set ephemeral OS disk is {0}'.format(self.ephemeral_os_disk)) + + if self.os_disk_size_gb and \ + self.os_disk_size_gb != vmss_dict['properties']['virtualMachineProfile']['storageProfile']['osDisk']['diskSizeGB']: + self.fail('VMSS OS disk size is not updatable: requested virtual machine OS disk size is {0}'.format(self.os_disk_size_gb)) + + if self.os_disk_caching and \ + self.os_disk_caching != vmss_dict['properties']['virtualMachineProfile']['storageProfile']['osDisk']['caching']: + self.log('CHANGED: virtual machine scale set {0} - OS disk caching'.format(self.name)) + differences.append('OS Disk caching') + changed = True + vmss_dict['properties']['virtualMachineProfile']['storageProfile']['osDisk']['caching'] = self.os_disk_caching + + if self.capacity and \ + self.capacity != vmss_dict['sku']['capacity']: + self.log('CHANGED: virtual machine scale set {0} - Capacity'.format(self.name)) + differences.append('Capacity') + changed = True + vmss_dict['sku']['capacity'] = self.capacity + + if self.data_disks and \ + len(self.data_disks) != len(vmss_dict['properties']['virtualMachineProfile']['storageProfile'].get('dataDisks', [])): + self.log('CHANGED: virtual machine scale set {0} - Data Disks'.format(self.name)) + differences.append('Data Disks') + changed = True + + if self.upgrade_policy and \ + self.upgrade_policy != vmss_dict['properties']['upgradePolicy']['mode']: + self.log('CHANGED: virtual machine scale set {0} - Upgrade Policy'.format(self.name)) + differences.append('Upgrade Policy') + changed = True + vmss_dict['properties']['upgradePolicy']['mode'] = self.upgrade_policy + + if image_reference and \ + image_reference.as_dict() != vmss_dict['properties']['virtualMachineProfile']['storageProfile']['imageReference']: + self.log('CHANGED: virtual machine scale set {0} - Image'.format(self.name)) + differences.append('Image') + changed = True + vmss_dict['properties']['virtualMachineProfile']['storageProfile']['imageReference'] = image_reference.as_dict() + + update_tags, vmss_dict['tags'] = self.update_tags(vmss_dict.get('tags', dict())) + if update_tags: + differences.append('Tags') + changed = True + + if self.overprovision is not None and bool(self.overprovision) != bool(vmss_dict['properties'].get('overprovision')): + differences.append('overprovision') + changed = True + + if bool(self.single_placement_group) != bool(vmss_dict['properties']['singlePlacementGroup']): + differences.append('single_placement_group') + changed = True + + vmss_dict['zones'] = [int(i) for i in vmss_dict['zones']] if 'zones' in vmss_dict and vmss_dict['zones'] else None + if self.zones != vmss_dict['zones']: + self.log("CHANGED: virtual machine scale sets {0} zones".format(self.name)) + differences.append('Zones') + changed = True + vmss_dict['zones'] = self.zones + + if self.terminate_event_timeout_minutes: + timeout = self.terminate_event_timeout_minutes + if timeout < 5 or timeout > 15: + self.fail("terminate_event_timeout_minutes should >= 5 and <= 15") + iso_8601_format = "PT" + str(timeout) + "M" + old = vmss_dict['properties']['virtualMachineProfile'].get('scheduledEventsProfile', {}).\ + get('terminateNotificationProfile', {}).get('notBeforeTimeout', "") + if old != iso_8601_format: + differences.append('terminateNotification') + changed = True + vmss_dict['properties']['virtualMachineProfile'].setdefault('scheduledEventsProfile', {})['terminateNotificationProfile'] = { + 'notBeforeTimeout': iso_8601_format, + "enable": 'true' + } + + if self.scale_in_policy and self.scale_in_policy != vmss_dict['properties'].get('scaleInPolicy', {}).get('rules', [""])[0]: + self.log("CHANGED: virtual machine sale sets {0} scale in policy".format(self.name)) + differences.append('scaleInPolicy') + changed = True + vmss_dict['properties'].setdefault('scaleInPolicy', {})['rules'] = [self.scale_in_policy] + + nicConfigs = vmss_dict['properties']['virtualMachineProfile']['networkProfile']['networkInterfaceConfigurations'] + + backend_address_pool = nicConfigs[0]['properties']['ipConfigurations'][0]['properties'].get('loadBalancerBackendAddressPools', []) + backend_address_pool += nicConfigs[0]['properties']['ipConfigurations'][0]['properties'].get('applicationGatewayBackendAddressPools', []) + lb_or_ag_id = None + if (len(nicConfigs) != 1 or len(backend_address_pool) != 1): + support_lb_change = False # Currently not support for the vmss contains more than one loadbalancer + self.module.warn('Updating more than one load balancer on VMSS is currently not supported') + else: + if load_balancer: + lb_or_ag_id = "{0}/".format(load_balancer.id) + elif application_gateway: + lb_or_ag_id = "{0}/".format(application_gateway.id) + + backend_address_pool_id = backend_address_pool[0].get('id') + if lb_or_ag_id is not None and (bool(lb_or_ag_id) != bool(backend_address_pool_id) or not backend_address_pool_id.startswith(lb_or_ag_id)): + differences.append('load_balancer') + changed = True + + if self.custom_data: + if self.custom_data != vmss_dict['properties']['virtualMachineProfile']['osProfile'].get('customData'): + differences.append('custom_data') + changed = True + vmss_dict['properties']['virtualMachineProfile']['osProfile']['customData'] = self.custom_data + + if self.orchestration_mode and self.orchestration_mode != vmss_dict['properties'].get('orchestrationMode'): + self.fail("The orchestration_mode parameter cannot be updated!") + else: + self.orchestration_mode = vmss_dict['properties'].get('orchestrationMode') + + if self.platform_fault_domain_count and self.platform_fault_domain_count != vmss_dict['properties'].get('platformFaultDomainCount'): + self.fail("The platform_fault_domain_count parameter cannot be updated!") + + if self.security_profile is not None: + update_security_profile = False + if 'securityProfile' not in vmss_dict['properties']['virtualMachineProfile'].keys(): + update_security_profile = True + differences.append('security_profile') + else: + if self.security_profile.get('encryption_at_host') is not None: + if bool(self.security_profile.get('encryption_at_host')) != \ + bool(vmss_dict['properties']['virtualMachineProfile']['securityProfile']['encryptionAtHost']): + update_security_profle = True + else: + self.security_profile['encryption_at_host'] = \ + vmss_dict['properties']['virtualMachineProfile']['securityProfile']['encryptionAtHost'] + if self.security_profile.get('security_type') is not None: + if self.security_profile.get('security_type') != \ + vmss_dict['properties']['virtualMachineProfile']['securityProfile']['securityType']: + update_security_profile = True + if self.security_profile.get('uefi_settings') is not None: + if self.security_profile['uefi_settings'].get('secure_boot_enabled') is not None: + if bool(self.security_profile['uefi_settings']['secure_boot_enabled']) != \ + bool(vmss_dict['properties']['virtualMachineProfile']['securityProfile']['uefiSettings']['secureBootEnabled']): + update_security_profile = True + else: + self.security_profile['uefi_settings']['secure_boot_enabled'] = \ + vmss_dict['properties']['virtualMachineProfile']['securityProfile']['uefiSettings']['secureBootEnabled'] + if self.security_profile['uefi_settings'].get('v_tpm_enabled') is not None: + if bool(self.security_profile['uefi_settings']['v_tpm_enabled']) != \ + bool(vmss_dict['properties']['virtualMachineProfile']['securityProfile']['uefiSettings']['vTpmEnabled']): + update_security_profile = True + else: + self.security_profile['uefi_settings']['v_tpm_enabled'] = \ + vmss_dict['properties']['virtualMachineProfile']['securityProfile']['uefiSettings']['vTpmEnabled'] + if update_security_profile: + changed = True + differences.append('security_profile') + + self.differences = differences + + elif self.state == 'absent': + self.log("CHANGED: virtual machine scale set {0} exists and requested state is 'absent'".format(self.name)) + results = dict() + changed = True + + except ResourceNotFoundError: + self.log('Virtual machine scale set {0} does not exist'.format(self.name)) + if self.state == 'present': + self.log("CHANGED: virtual machine scale set {0} does not exist but state is 'present'.".format(self.name)) + changed = True + + self.results['changed'] = changed + self.results['ansible_facts']['azure_vmss'] = results + + if self.check_mode: + return self.results + + if changed: + if self.state == 'present': + if not vmss: + # Create the VMSS + if self.vm_size is None: + self.fail("vm size must be set") + + self.log("Create virtual machine scale set {0}".format(self.name)) + self.results['actions'].append('Created VMSS {0}'.format(self.name)) + + if self.os_type == 'Linux': + if disable_ssh_password and not self.ssh_public_keys: + self.fail("Parameter error: ssh_public_keys required when disabling SSH password.") + + if not self.virtual_network_name: + self.fail("virtual network name is required") + + if self.subnet_name: + subnet = self.get_subnet(self.virtual_network_name, self.subnet_name) + + if not self.short_hostname: + self.short_hostname = self.name + + if not image_reference: + self.fail("Parameter error: an image is required when creating a virtual machine.") + + managed_disk = self.compute_models.VirtualMachineScaleSetManagedDiskParameters(storage_account_type=self.managed_disk_type) + + if self.security_group: + nsg = self.parse_nsg() + if nsg: + self.security_group = self.network_models.NetworkSecurityGroup(id=nsg.get('id')) + + plan = None + if self.plan: + plan = self.compute_models.Plan(name=self.plan.get('name'), product=self.plan.get('product'), + publisher=self.plan.get('publisher'), + promotion_code=self.plan.get('promotion_code')) + + os_profile = None + if self.admin_username or self.custom_data or self.ssh_public_keys: + os_profile = self.compute_models.VirtualMachineScaleSetOSProfile( + admin_username=self.admin_username, + computer_name_prefix=self.short_hostname, + custom_data=self.custom_data + ) + + vmss_resource = self.compute_models.VirtualMachineScaleSet( + location=self.location, + overprovision=self.overprovision, + single_placement_group=self.single_placement_group, + tags=self.tags, + orchestration_mode=self.orchestration_mode, + platform_fault_domain_count=self.platform_fault_domain_count, + upgrade_policy=self.compute_models.UpgradePolicy( + mode=self.upgrade_policy + ) if self.upgrade_policy is not None else None, + sku=self.compute_models.Sku( + name=self.vm_size, + capacity=self.capacity, + tier=self.tier, + ), + plan=plan, + virtual_machine_profile=self.compute_models.VirtualMachineScaleSetVMProfile( + os_profile=os_profile, + # pylint: disable=missing-kwoa + storage_profile=self.compute_models.VirtualMachineScaleSetStorageProfile( + os_disk=self.compute_models.VirtualMachineScaleSetOSDisk( + managed_disk=managed_disk, + create_option=self.compute_models.DiskCreateOptionTypes.from_image, + caching=self.os_disk_caching, + disk_size_gb=self.os_disk_size_gb, + diff_disk_settings=self.compute_models.DiffDiskSettings(option='Local') if self.ephemeral_os_disk else None, + ), + image_reference=image_reference, + ), + network_profile=self.compute_models.VirtualMachineScaleSetNetworkProfile( + network_interface_configurations=[ + self.compute_models.VirtualMachineScaleSetNetworkConfiguration( + name=self.name, + primary=True, + ip_configurations=[ + self.compute_models.VirtualMachineScaleSetIPConfiguration( + name='default', + subnet=self.compute_models.ApiEntityReference( + id=subnet.id + ), + public_ip_address_configuration=public_ip_address_configuration, + primary=True, + load_balancer_backend_address_pools=load_balancer_backend_address_pools, + load_balancer_inbound_nat_pools=load_balancer_inbound_nat_pools, + application_gateway_backend_address_pools=application_gateway_backend_address_pools + ) + ], + enable_accelerated_networking=self.enable_accelerated_networking, + network_security_group=self.security_group + ) + ], + network_api_version='2020-11-01' if self.orchestration_mode == 'Flexible' else None + ) + ), + zones=self.zones + ) + + if self.priority == 'Spot': + vmss_resource.virtual_machine_profile.priority = self.priority + vmss_resource.virtual_machine_profile.eviction_policy = self.eviction_policy + vmss_resource.virtual_machine_profile.billing_profile = self.compute_models.BillingProfile( + max_price=self.max_price + ) + + if self.scale_in_policy: + vmss_resource.scale_in_policy = self.gen_scale_in_policy() + + if self.terminate_event_timeout_minutes: + vmss_resource.virtual_machine_profile.scheduled_events_profile = self.gen_scheduled_event_profile() + + if self.admin_password: + vmss_resource.virtual_machine_profile.os_profile.admin_password = self.admin_password + + if self.os_type == 'Linux' and os_profile: + vmss_resource.virtual_machine_profile.os_profile.linux_configuration = self.compute_models.LinuxConfiguration( + disable_password_authentication=disable_ssh_password + ) + + if self.ssh_public_keys: + ssh_config = self.compute_models.SshConfiguration() + ssh_config.public_keys = \ + [self.compute_models.SshPublicKey(path=key['path'], key_data=key['key_data']) for key in self.ssh_public_keys] + vmss_resource.virtual_machine_profile.os_profile.linux_configuration.ssh = ssh_config + + if self.data_disks: + data_disks = [] + + for data_disk in self.data_disks: + data_disk_managed_disk = self.compute_models.VirtualMachineScaleSetManagedDiskParameters( + storage_account_type=data_disk.get('managed_disk_type', None) + ) + + data_disk['caching'] = data_disk.get( + 'caching', + self.compute_models.CachingTypes.read_only + ) + + data_disks.append(self.compute_models.VirtualMachineScaleSetDataDisk( + lun=data_disk.get('lun', None), + caching=data_disk.get('caching', None), + create_option=self.compute_models.DiskCreateOptionTypes.empty, + disk_size_gb=data_disk.get('disk_size_gb', None), + managed_disk=data_disk_managed_disk, + )) + + vmss_resource.virtual_machine_profile.storage_profile.data_disks = data_disks + + if self.plan: + try: + plan_name = self.plan.get('name') + plan_product = self.plan.get('product') + plan_publisher = self.plan.get('publisher') + term = self.marketplace_client.marketplace_agreements.get( + offer_type='virtualmachine', publisher_id=plan_publisher, offer_id=plan_product, plan_id=plan_name) + term.accepted = True + self.marketplace_client.marketplace_agreements.create( + offer_type='virtualmachine', publisher_id=plan_publisher, offer_id=plan_product, plan_id=plan_name, parameters=term) + except Exception as exc: + self.fail(("Error accepting terms for virtual machine {0} with plan {1}. " + + "Only service admin/account admin users can purchase images " + + "from the marketplace. - {2}").format(self.name, self.plan, str(exc))) + + if self.security_profile is not None: + uefi_settings_spec = None + if self.security_profile.get('uefi_settings') is not None: + uefi_settings_spec = self.compute_models.UefiSettings( + secure_boot_enabled=self.security_profile['uefi_settings'].get('secure_boot_enabled'), + v_tpm_enabled=self.security_profile['uefi_settings'].get('v_tpm_enabled'), + ) + security_profile = self.compute_models.SecurityProfile( + uefi_settings=uefi_settings_spec, + encryption_at_host=self.security_profile.get('encryption_at_host'), + security_type=self.security_profile.get('security_type'), + ) + vmss_resource.virtual_machine_profile.security_profile = security_profile + + self.log("Create virtual machine with parameters:") + self.create_or_update_vmss(vmss_resource) + + elif self.differences and len(self.differences) > 0: + self.log("Update virtual machine scale set {0}".format(self.name)) + self.results['actions'].append('Updated VMSS {0}'.format(self.name)) + + vmss_resource = self.get_vmss() + vmss_resource.virtual_machine_profile.storage_profile.os_disk.caching = self.os_disk_caching + vmss_resource.sku.capacity = self.capacity + vmss_resource.orchestration_mode = self.orchestration_mode + vmss_resource.platform_fault_domain_count = self.platform_fault_domain_count + vmss_resource.overprovision = self.overprovision + vmss_resource.single_placement_group = self.single_placement_group + + if support_lb_change: + if self.load_balancer: + vmss_resource.virtual_machine_profile.network_profile.network_interface_configurations[0] \ + .ip_configurations[0].load_balancer_backend_address_pools = load_balancer_backend_address_pools + vmss_resource.virtual_machine_profile.network_profile.network_interface_configurations[0] \ + .ip_configurations[0].load_balancer_inbound_nat_pools = load_balancer_inbound_nat_pools + vmss_resource.virtual_machine_profile.network_profile.network_interface_configurations[0] \ + .ip_configurations[0].application_gateway_backend_address_pools = None + elif self.application_gateway: + vmss_resource.virtual_machine_profile.network_profile.network_interface_configurations[0] \ + .ip_configurations[0].application_gateway_backend_address_pools = application_gateway_backend_address_pools + vmss_resource.virtual_machine_profile.network_profile.network_interface_configurations[0] \ + .ip_configurations[0].load_balancer_backend_address_pools = None + vmss_resource.virtual_machine_profile.network_profile.network_interface_configurations[0] \ + .ip_configurations[0].load_balancer_inbound_nat_pools = None + + if self.data_disks is not None: + data_disks = [] + for data_disk in self.data_disks: + data_disks.append(self.compute_models.VirtualMachineScaleSetDataDisk( + lun=data_disk['lun'], + caching=data_disk['caching'], + create_option=self.compute_models.DiskCreateOptionTypes.empty, + disk_size_gb=data_disk['disk_size_gb'], + managed_disk=self.compute_models.VirtualMachineScaleSetManagedDiskParameters( + storage_account_type=data_disk.get('managed_disk_type', None) + ), + )) + vmss_resource.virtual_machine_profile.storage_profile.data_disks = data_disks + + if self.security_profile is not None: + uefi_settings_spec = None + if self.security_profile.get('uefi_settings') is not None: + uefi_settings_spec = self.compute_models.UefiSettings( + secure_boot_enabled=self.security_profile['uefi_settings'].get('secure_boot_enabled'), + v_tpm_enabled=self.security_profile['uefi_settings'].get('v_tpm_enabled'), + ) + security_profile = self.compute_models.SecurityProfile( + uefi_settings=uefi_settings_spec, + encryption_at_host=self.security_profile.get('encryption_at_host'), + security_type=self.security_profile.get('security_type'), + ) + vmss_resource.virtual_machine_profile.security_profile = security_profile + + if self.scale_in_policy: + vmss_resource.scale_in_policy = self.gen_scale_in_policy() + + if self.terminate_event_timeout_minutes: + vmss_resource.virtual_machine_profile.scheduled_events_profile = self.gen_scheduled_event_profile() + + if image_reference is not None: + vmss_resource.virtual_machine_profile.storage_profile.image_reference = image_reference + + self.log("Update virtual machine with parameters:") + self.create_or_update_vmss(vmss_resource) + + self.results['ansible_facts']['azure_vmss'] = self.serialize_vmss(self.get_vmss()) + + elif self.state == 'absent': + # delete the VM + self.log("Delete virtual machine scale set {0}".format(self.name)) + self.results['ansible_facts']['azure_vmss'] = None + self.delete_vmss(vmss) + + # until we sort out how we want to do this globally + del self.results['actions'] + + return self.results + + def get_vmss(self): + ''' + Get the VMSS + + :return: VirtualMachineScaleSet object + ''' + try: + vmss = self.compute_client.virtual_machine_scale_sets.get(self.resource_group, self.name) + return vmss + except ResourceNotFoundError as exc: + self.fail("Error getting virtual machine scale set {0} - {1}".format(self.name, str(exc))) + + def get_virtual_network(self, name): + try: + vnet = self.network_client.virtual_networks.get(self.virtual_network_resource_group, name) + return vnet + except ResourceNotFoundError as exc: + self.fail("Error fetching virtual network {0} - {1}".format(name, str(exc))) + + def get_subnet(self, vnet_name, subnet_name): + self.log("Fetching subnet {0} in virtual network {1}".format(subnet_name, vnet_name)) + try: + subnet = self.network_client.subnets.get(self.virtual_network_resource_group, vnet_name, subnet_name) + except CloudError as exc: + self.fail("Error: fetching subnet {0} in virtual network {1} - {2}".format( + subnet_name, + vnet_name, + str(exc))) + return subnet + + def get_load_balancer(self, id): + id_dict = parse_resource_id(id) + try: + return self.network_client.load_balancers.get(id_dict.get('resource_group', self.resource_group), id_dict.get('name')) + except ResourceNotFoundError as exc: + self.fail("Error fetching load balancer {0} - {1}".format(id, str(exc))) + + def get_application_gateway(self, id): + id_dict = parse_resource_id(id) + try: + return self.network_client.application_gateways.get(id_dict.get('resource_group', self.resource_group), id_dict.get('name')) + except ResourceNotFoundError as exc: + self.fail("Error fetching application_gateway {0} - {1}".format(id, str(exc))) + + def serialize_vmss(self, vmss): + ''' + Convert a VirtualMachineScaleSet object to dict. + + :param vm: VirtualMachineScaleSet object + :return: dict + ''' + + result = self.serialize_obj(vmss, AZURE_OBJECT_CLASS, enum_modules=AZURE_ENUM_MODULES) + result['id'] = vmss.id + result['name'] = vmss.name + result['type'] = vmss.type + result['location'] = vmss.location + result['tags'] = vmss.tags + + return result + + def delete_vmss(self, vmss): + self.log("Deleting virtual machine scale set {0}".format(self.name)) + self.results['actions'].append("Deleted virtual machine scale set {0}".format(self.name)) + try: + poller = self.compute_client.virtual_machine_scale_sets.begin_delete(self.resource_group, self.name) + # wait for the poller to finish + self.get_poller_result(poller) + except Exception as exc: + self.fail("Error deleting virtual machine scale set {0} - {1}".format(self.name, str(exc))) + + return True + + def get_marketplace_image_version(self): + try: + versions = self.compute_client.virtual_machine_images.list(self.location, + self.image['publisher'], + self.image['offer'], + self.image['sku']) + except ResourceNotFoundError as exc: + self.fail("Error fetching image {0} {1} {2} - {3}".format(self.image['publisher'], + self.image['offer'], + self.image['sku'], + str(exc))) + if versions and len(versions) > 0: + if self.image['version'] == 'latest': + return versions[len(versions) - 1] + for version in versions: + if version.name == self.image['version']: + return version + + self.fail("Error could not find image {0} {1} {2} {3}".format(self.image['publisher'], + self.image['offer'], + self.image['sku'], + self.image['version'])) + + def get_custom_image_reference(self, name, resource_group=None): + try: + if resource_group: + vm_images = self.compute_client.images.list_by_resource_group(resource_group) + else: + vm_images = self.compute_client.images.list() + except ResourceNotFoundError as exc: + self.fail("Error fetching custom images from subscription - {0}".format(str(exc))) + + for vm_image in vm_images: + if vm_image.name == name: + self.log("Using custom image id {0}".format(vm_image.id)) + return self.compute_models.ImageReference(id=vm_image.id) + + self.fail("Error could not find image with name {0}".format(name)) + + def create_or_update_vmss(self, params): + try: + poller = self.compute_client.virtual_machine_scale_sets.begin_create_or_update(self.resource_group, self.name, params) + self.get_poller_result(poller) + except Exception as exc: + self.fail("Error creating or updating virtual machine {0} - {1}".format(self.name, str(exc))) + + def vm_size_is_valid(self): + ''' + Validate self.vm_size against the list of virtual machine sizes available for the account and location. + + :return: boolean + ''' + try: + sizes = self.compute_client.virtual_machine_sizes.list(self.location) + except ResourceNotFoundError as exc: + self.fail("Error retrieving available machine sizes - {0}".format(str(exc))) + for size in sizes: + if size.name == self.vm_size: + return True + return False + + def parse_nsg(self): + nsg = self.security_group + resource_group = self.resource_group + if isinstance(self.security_group, dict): + nsg = self.security_group.get('name') + resource_group = self.security_group.get('resource_group', self.resource_group) + id = format_resource_id(val=nsg, + subscription_id=self.subscription_id, + namespace='Microsoft.Network', + types='networkSecurityGroups', + resource_group=resource_group) + name = azure_id_to_dict(id).get('name') + return dict(id=id, name=name) + + def gen_scheduled_event_profile(self): + if self.terminate_event_timeout_minutes is None: + return None + + scheduledEventProfile = self.compute_models.ScheduledEventsProfile() + terminationProfile = self.compute_models.TerminateNotificationProfile() + terminationProfile.not_before_timeout = "PT" + str(self.terminate_event_timeout_minutes) + "M" + terminationProfile.enable = True + scheduledEventProfile.terminate_notification_profile = terminationProfile + return scheduledEventProfile + + def gen_scale_in_policy(self): + if self.scale_in_policy is None: + return None + + return self.compute_models.ScaleInPolicy(rules=[self.scale_in_policy]) + + +def main(): + AzureRMVirtualMachineScaleSet() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinescaleset_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinescaleset_info.py new file mode 100644 index 000000000..a2154d017 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinescaleset_info.py @@ -0,0 +1,449 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, Sertac Ozercan +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_virtualmachinescaleset_info + +version_added: "0.1.2" + +short_description: Get Virtual Machine Scale Set facts + +description: + - Get facts for a virtual machine scale set. + - Note that this module was called M(azure.azcollection.azure_rm_virtualmachine_scaleset_facts) before Ansible 2.8. The usage did not change. + +options: + name: + description: + - Limit results to a specific virtual machine scale set. + resource_group: + description: + - The resource group to search for the desired virtual machine scale set. + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + format: + description: + - Format of the data returned. + - If C(raw) is selected information will be returned in raw format from Azure Python SDK. + - If C(curated) is selected the structure will be identical to input parameters of M(azure.azcollection.azure_rm_virtualmachinescaleset) module. + - In Ansible 2.5 and lower facts are always returned in raw format. + - Please note that this option will be deprecated in 2.10 when curated format will become the only supported format. + default: 'raw' + choices: + - 'curated' + - 'raw' + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Sertac Ozercan (@sozercan) +''' + +EXAMPLES = ''' + - name: Get facts for a virtual machine scale set + azure_rm_virtualmachinescaleset_info: + resource_group: myResourceGroup + name: testvmss001 + format: curated + + - name: Get facts for all virtual networks + azure_rm_virtualmachinescaleset_info: + resource_group: myResourceGroup + + - name: Get facts by tags + azure_rm_virtualmachinescaleset_info: + resource_group: myResourceGroup + tags: + - testing +''' + +RETURN = ''' +vmss: + description: + - List of virtual machine scale sets. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/scalesets/myscaleset + admin_username: + description: + - Admin username used to access the host after it is created. + returned: always + type: str + sample: adminuser + capacity: + description: + - Capacity of VMSS. + returned: always + type: int + sample: 2 + data_disks: + description: + - List of attached data disks. + returned: always + type: complex + contains: + caching: + description: + - Type of data disk caching. + returned: always + type: str + sample: ReadOnly + disk_size_gb: + description: + - The initial disk size in GB for blank data disks. + returned: always + type: int + sample: 64 + lun: + description: + - The logical unit number for data disk. + returned: always + type: int + sample: 0 + managed_disk_type: + description: + - Managed data disk type. + returned: always + type: str + sample: Standard_LRS + image: + description: + - Image specification. + returned: always + type: complex + contains: + offer: + description: + - The offer of the platform image or marketplace image used to create the virtual machine. + returned: always + type: str + sample: RHEL + publisher: + description: + - Publisher name. + returned: always + type: str + sample: RedHat + sku: + description: + - SKU name. + returned: always + type: str + sample: 7-RAW + version: + description: + - Image version. + returned: always + type: str + sample: 7.5.2018050901 + load_balancer: + description: + - Load balancer name. + returned: always + type: str + sample: testlb + location: + description: + - Resource location. + type: str + returned: always + sample: japaneast + managed_disk_type: + description: + - Managed data disk type. + type: str + returned: always + sample: Standard_LRS + name: + description: + - Resource name. + returned: always + type: str + sample: myvmss + os_disk_caching: + description: + - Type of OS disk caching. + type: str + returned: always + sample: ReadOnly + os_type: + description: + - Base type of operating system. + type: str + returned: always + sample: Linux + overprovision: + description: + - Specifies whether the Virtual Machine Scale Set should be overprovisioned. + type: bool + sample: true + resource_group: + description: + - Resource group. + type: str + returned: always + sample: myResourceGroup + ssh_password_enabled: + description: + - Is SSH password authentication enabled. Valid only for Linux. + type: bool + returned: always + sample: true + subnet_name: + description: + - Subnet name. + type: str + returned: always + sample: testsubnet + tier: + description: + - SKU Tier. + type: str + returned: always + sample: Basic + upgrade_policy: + description: + - Upgrade policy. + type: str + returned: always + sample: Manual + virtual_network_name: + description: + - Associated virtual network name. + type: str + returned: always + sample: testvn + vm_size: + description: + - Virtual machine size. + type: str + returned: always + sample: Standard_D4 + tags: + description: + - Tags assigned to the resource. Dictionary of string:string pairs. + returned: always + type: dict + sample: { "tag1": "abc" } + orchestrationMode: + description: + - The orchestration mode for the virtual machine scale set. + type: str + returned: always + sample: Flexible + platformFaultDomainCount: + description: + - Fault Domain count for each placement group. + type: int + returned: always + sample: 1 +''' # NOQA + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +import re + +try: + from azure.core.exceptions import ResourceNotFoundError +except Exception: + # handled in azure_rm_common + pass + +AZURE_OBJECT_CLASS = 'VirtualMachineScaleSet' + +AZURE_ENUM_MODULES = ['azure.mgmt.compute.models'] + + +class AzureRMVirtualMachineScaleSetInfo(AzureRMModuleBase): + """Utility class to get virtual machine scale set facts""" + + def __init__(self): + + self.module_args = dict( + name=dict(type='str'), + resource_group=dict(type='str'), + tags=dict(type='list', elements='str'), + format=dict( + type='str', + choices=['curated', + 'raw'], + default='raw' + ) + ) + + self.results = dict( + changed=False, + ) + + self.name = None + self.resource_group = None + self.format = None + self.tags = None + + super(AzureRMVirtualMachineScaleSetInfo, self).__init__( + derived_arg_spec=self.module_args, + supports_check_mode=True, + supports_tags=False, + facts_module=True + ) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_virtualmachinescaleset_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_virtualmachinescaleset_facts' module has been renamed to 'azure_rm_virtualmachinescaleset_info'", + version=(2.9, )) + + for key in self.module_args: + setattr(self, key, kwargs[key]) + + if self.name and not self.resource_group: + self.fail("Parameter error: resource group required when filtering by name.") + + if self.name: + result = self.get_item() + else: + result = self.list_items() + + if self.format == 'curated': + for index in range(len(result)): + vmss = result[index] + subnet_name = None + load_balancer_name = None + virtual_network_name = None + ssh_password_enabled = False + + try: + subnet_id = (vmss['properties']['virtualMachineProfile']['networkProfile']['networkInterfaceConfigurations'][0] + ['properties']['ipConfigurations'][0]['properties']['subnet']['id']) + subnet_name = re.sub('.*subnets\\/', '', subnet_id) + except Exception: + self.log('Could not extract subnet name') + + try: + backend_address_pool_id = (vmss['properties']['virtualMachineProfile']['networkProfile']['networkInterfaceConfigurations'][0] + ['properties']['ipConfigurations'][0]['properties']['loadBalancerBackendAddressPools'][0]['id']) + load_balancer_name = re.sub('\\/backendAddressPools.*', '', re.sub('.*loadBalancers\\/', '', backend_address_pool_id)) + virtual_network_name = re.sub('.*virtualNetworks\\/', '', re.sub('\\/subnets.*', '', subnet_id)) + except Exception: + self.log('Could not extract load balancer / virtual network name') + + try: + ssh_password_enabled = (not vmss['properties']['virtualMachineProfile']['osProfile'] + ['linuxConfiguration']['disablePasswordAuthentication']) + except Exception: + self.log('Could not extract SSH password enabled') + + data_disks = vmss['properties']['virtualMachineProfile']['storageProfile'].get('dataDisks', []) + + for disk_index in range(len(data_disks)): + old_disk = data_disks[disk_index] + new_disk = { + 'lun': old_disk['lun'], + 'disk_size_gb': old_disk['diskSizeGB'], + 'managed_disk_type': old_disk['managedDisk']['storageAccountType'], + 'caching': old_disk['caching'] + } + data_disks[disk_index] = new_disk + + updated = { + 'id': vmss['id'], + 'resource_group': self.resource_group, + 'name': vmss['name'], + 'state': 'present', + 'location': vmss['location'], + 'vm_size': vmss['sku']['name'], + 'capacity': vmss['sku']['capacity'], + 'tier': vmss['sku']['tier'], + 'upgrade_policy': vmss['properties'].get('upgradePolicy'), + 'orchestrationMode': vmss['properties'].get('orchestrationMode'), + 'platformFaultDomainCount': vmss['properties'].get('platformFaultDomainCount'), + 'admin_username': vmss['properties']['virtualMachineProfile']['osProfile']['adminUsername'], + 'admin_password': vmss['properties']['virtualMachineProfile']['osProfile'].get('adminPassword'), + 'ssh_password_enabled': ssh_password_enabled, + 'image': vmss['properties']['virtualMachineProfile']['storageProfile']['imageReference'], + 'os_disk_caching': vmss['properties']['virtualMachineProfile']['storageProfile']['osDisk']['caching'], + 'os_type': 'Linux' if (vmss['properties']['virtualMachineProfile']['osProfile'].get('linuxConfiguration') is not None) else 'Windows', + 'overprovision': vmss['properties'].get('overprovision'), + 'managed_disk_type': vmss['properties']['virtualMachineProfile']['storageProfile']['osDisk']['managedDisk']['storageAccountType'], + 'data_disks': data_disks, + 'virtual_network_name': virtual_network_name, + 'subnet_name': subnet_name, + 'load_balancer': load_balancer_name, + 'tags': vmss.get('tags') + } + + result[index] = updated + + if is_old_facts: + self.results['ansible_facts'] = { + 'azure_vmss': result + } + if self.format == 'curated': + # proper result format we want to support in the future + # dropping 'ansible_facts' and shorter name 'vmss' + self.results['vmss'] = result + else: + self.results['vmss'] = result + + return self.results + + def get_item(self): + """Get a single virtual machine scale set""" + + self.log('Get properties for {0}'.format(self.name)) + + item = None + results = [] + + try: + item = self.compute_client.virtual_machine_scale_sets.get(self.resource_group, self.name) + except ResourceNotFoundError: + pass + + if item and self.has_tags(item.tags, self.tags): + results = [self.serialize_obj(item, AZURE_OBJECT_CLASS, enum_modules=AZURE_ENUM_MODULES)] + + return results + + def list_items(self): + """Get all virtual machine scale sets""" + + self.log('List all virtual machine scale sets') + + try: + response = self.compute_client.virtual_machine_scale_sets.list(self.resource_group) + except ResourceNotFoundError as exc: + self.fail('Failed to list all items - {0}'.format(str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS, enum_modules=AZURE_ENUM_MODULES)) + + return results + + +def main(): + """Main module execution code path""" + + AzureRMVirtualMachineScaleSetInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinescalesetextension.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinescalesetextension.py new file mode 100644 index 000000000..eec58bcf1 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinescalesetextension.py @@ -0,0 +1,296 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Zim Kalinowski (@zikalino) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_virtualmachinescalesetextension + +version_added: "0.1.2" + +short_description: Manage Azure Virtual Machine Scale Set (VMSS) extensions + +description: + - Create, update and delete Azure Virtual Machine Scale Set (VMSS) extensions. + +options: + resource_group: + description: + - Name of a resource group where the VMSS extension exists or will be created. + required: true + vmss_name: + description: + - The name of the virtual machine where the extension should be create or updated. + required: true + name: + description: + - Name of the VMSS extension. + location: + description: + - Valid Azure location. Defaults to location of the resource group. + publisher: + description: + - The name of the extension handler publisher. + type: + description: + - The type of the extension handler. + type_handler_version: + description: + - The type version of the extension handler. + settings: + description: + - A dictionary containing extension settings. + - Settings depend on extension type. + - Refer to U(https://docs.microsoft.com/en-us/azure/virtual-machines/extensions/overview) for more information. + protected_settings: + description: + - A dictionary containing protected extension settings. + - Settings depend on extension type. + - Refer to U(https://docs.microsoft.com/en-us/azure/virtual-machines/extensions/overview) for more information. + auto_upgrade_minor_version: + description: + - Whether the extension handler should be automatically upgraded across minor versions. + type: bool + state: + description: + - Assert the state of the extension. + - Use C(present) to create or update an extension and C(absent) to delete it. + default: present + choices: + - absent + - present + + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) +''' + +EXAMPLES = ''' + - name: Install VMSS Extension + azure_rm_virtualmachinescalesetextension: + name: myvmssextension + location: eastus + resource_group: myResourceGroup + vmss_name: myvm + publisher: Microsoft.Azure.Extensions + type: CustomScript + type_handler_version: 2.0 + settings: '{"commandToExecute": "hostname"}' + auto_upgrade_minor_version: true + + - name: Remove VMSS Extension + azure_rm_virtualmachinescalesetextension: + name: myvmssextension + location: eastus + resource_group: myResourceGroup + vmss_name: myvm + state: absent +''' + +RETURN = ''' +id: + description: + - VMSS extension resource ID. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/scalesets/myscaleset/extensions/myext +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMVMSSExtension(AzureRMModuleBase): + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + vmss_name=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + location=dict( + type='str' + ), + publisher=dict( + type='str' + ), + type=dict( + type='str' + ), + type_handler_version=dict( + type='str' + ), + auto_upgrade_minor_version=dict( + type='bool' + ), + settings=dict( + type='dict' + ), + protected_settings=dict( + type='dict' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ), + ) + + self.resource_group = None + self.name = None + self.location = None + self.publisher = None + self.type = None + self.type_handler_version = None + self.auto_upgrade_minor_version = None + self.settings = None + self.protected_settings = None + self.state = None + + required_if = [ + ('state', 'present', [ + 'publisher', 'type', 'type_handler_version']) + ] + + self.results = dict(changed=False, state=dict()) + + super(AzureRMVMSSExtension, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_tags=False, + required_if=required_if) + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()): + setattr(self, key, kwargs[key]) + + resource_group = None + response = None + to_be_updated = False + + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + self.location = resource_group.location + + if self.state == 'present': + response = self.get_vmssextension() + if not response: + to_be_updated = True + else: + if self.settings is not None: + if response.get('settings') != self.settings: + response['settings'] = self.settings + to_be_updated = True + else: + self.settings = response.get('settings') + + if self.protected_settings is not None: + if response.get('protected_settings') != self.protected_settings: + response['protected_settings'] = self.protected_settings + to_be_updated = True + else: + self.protected_settings = response.get('protected_settings') + + if response['publisher'] != self.publisher: + self.publisher = response['publisher'] + self.module.warn("Property 'publisher' cannot be changed") + + if response['type'] != self.type: + self.type = response['type'] + self.module.warn("Property 'type' cannot be changed") + + if response['type_handler_version'] != self.type_handler_version: + response['type_handler_version'] = self.type_handler_version + to_be_updated = True + + if self.auto_upgrade_minor_version is not None: + if response['auto_upgrade_minor_version'] != self.auto_upgrade_minor_version: + response['auto_upgrade_minor_version'] = self.auto_upgrade_minor_version + to_be_updated = True + else: + self.auto_upgrade_minor_version = response['auto_upgrade_minor_version'] + + if to_be_updated: + if not self.check_mode: + response = self.create_or_update_vmssextension() + self.results['changed'] = True + elif self.state == 'absent': + if not self.check_mode: + self.delete_vmssextension() + self.results['changed'] = True + + if response: + self.results['id'] = response.get('id') + + return self.results + + def create_or_update_vmssextension(self): + self.log("Creating VMSS extension {0}".format(self.name)) + try: + params = self.compute_models.VirtualMachineScaleSetExtension( + location=self.location, + publisher=self.publisher, + type_properties_type=self.type, + type_handler_version=self.type_handler_version, + auto_upgrade_minor_version=self.auto_upgrade_minor_version, + settings=self.settings, + protected_settings=self.protected_settings + ) + poller = self.compute_client.virtual_machine_scale_set_extensions.begin_create_or_update(resource_group_name=self.resource_group, + vm_scale_set_name=self.vmss_name, + vmss_extension_name=self.name, + extension_parameters=params) + response = self.get_poller_result(poller) + return response.as_dict() + + except Exception as e: + self.log('Error attempting to create the VMSS extension.') + self.fail("Error creating the VMSS extension: {0}".format(str(e))) + + def delete_vmssextension(self): + self.log("Deleting vmextension {0}".format(self.name)) + try: + poller = self.compute_client.virtual_machine_scale_set_extensions.begin_delete(resource_group_name=self.resource_group, + vm_scale_set_name=self.vmss_name, + vmss_extension_name=self.name) + self.get_poller_result(poller) + except Exception as e: + self.log('Error attempting to delete the vmextension.') + self.fail("Error deleting the vmextension: {0}".format(str(e))) + + def get_vmssextension(self): + self.log("Checking if the VMSS extension {0} is present".format(self.name)) + try: + response = self.compute_client.virtual_machine_scale_set_extensions.get(self.resource_group, self.vmss_name, self.name) + return response.as_dict() + except ResourceNotFoundError as e: + self.log('Did not find VMSS extension') + return False + + +def main(): + AzureRMVMSSExtension() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinescalesetextension_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinescalesetextension_info.py new file mode 100644 index 000000000..3c54de11a --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinescalesetextension_info.py @@ -0,0 +1,223 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Zim Kalinowski, (@zikalino) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_virtualmachinescalesetextension_info +version_added: "0.1.2" +short_description: Get Azure Virtual Machine Scale Set Extension facts +description: + - Get facts of Azure Virtual Machine Scale Set Extension. + +options: + resource_group: + description: + - The name of the resource group. + required: True + vmss_name: + description: + - The name of VMSS containing the extension. + required: True + name: + description: + - The name of the virtual machine extension. + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Get information on specific Virtual Machine Scale Set Extension + azure_rm_virtualmachineextension_info: + resource_group: myResourceGroup + vmss_name: myvmss + name: myextension + + - name: List installed Virtual Machine Scale Set Extensions + azure_rm_virtualmachineextension_info: + resource_group: myrg + vmss_name: myvmss +''' + +RETURN = ''' +extensions: + description: + - A list of dictionaries containing facts for Virtual Machine Extension. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/virtualMachineScaleSets/ + myvmss/extensions/myextension" + resource_group: + description: + - Resource group name. + returned: always + type: str + sample: myrg + vmss_name: + description: + - Virtual machine scale set name. + returned: always + type: str + sample: myvmss + name: + description: + - Virtual machine extension name. + returned: always + type: str + sample: myextension + publisher: + description: + - Extension publisher. + returned: always + type: str + sample: Microsoft.Azure.Extensions + type: + description: + - Extension type. + returned: always + type: str + sample: CustomScript + settings: + description: + - Extension specific settings dictionary. + returned: always + type: dict + sample: { 'commandToExecute':'hostname' } + auto_upgrade_minor_version: + description: + - Autoupgrade minor version flag. + returned: always + type: bool + sample: true + provisioning_state: + description: + - Provisioning state of the extension. + returned: always + type: str + sample: Succeeded +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from msrest.serialization import Model + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMVirtualMachineScaleSetExtensionInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + vmss_name=dict( + type='str', + required=True + ), + name=dict( + type='str' + ) + ) + # store the results of the module operation + self.results = dict( + changed=False + ) + self.resource_group = None + self.vmss_name = None + self.name = None + super(AzureRMVirtualMachineScaleSetExtensionInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_virtualmachinescalesetextension_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_virtualmachinescalesetextension_facts' module has been renamed to" + + " 'azure_rm_virtualmachinescalesetextension_info'", + version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.name is not None: + self.results['extensions'] = self.get_extensions() + else: + self.results['extensions'] = self.list_extensions() + + return self.results + + def get_extensions(self): + response = None + results = [] + try: + response = self.compute_client.virtual_machine_scale_set_extensions.get(resource_group_name=self.resource_group, + vm_scale_set_name=self.vmss_name, + vmss_extension_name=self.name) + self.log("Response : {0}".format(response)) + except ResourceNotFoundError as e: + self.log('Could not get facts for Virtual Machine Extension.') + + if response: + results.append(self.format_response(response)) + + return results + + def list_extensions(self): + response = None + results = [] + try: + response = self.compute_client.virtual_machine_scale_set_extensions.list(resource_group_name=self.resource_group, + vm_scale_set_name=self.vmss_name) + self.log("Response : {0}".format(response)) + except ResourceNotFoundError as e: + self.log('Could not get facts for Virtual Machine Extension.') + + if response is not None: + for item in response: + results.append(self.format_response(item)) + + return results + + def format_response(self, item): + id_template = "/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Compute/virtualMachineScaleSets/{2}/extensions/{3}" + d = item.as_dict() + d = { + 'id': id_template.format(self.subscription_id, self.resource_group, self.vmss_name, d.get('name')), + 'resource_group': self.resource_group, + 'vmss_name': self.vmss_name, + 'name': d.get('name'), + 'publisher': d.get('publisher'), + 'type': d.get('type'), + 'settings': d.get('settings'), + 'auto_upgrade_minor_version': d.get('auto_upgrade_minor_version'), + 'provisioning_state': d.get('provisioning_state') + } + return d + + +def main(): + AzureRMVirtualMachineScaleSetExtensionInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinescalesetinstance.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinescalesetinstance.py new file mode 100644 index 000000000..54ed23b86 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinescalesetinstance.py @@ -0,0 +1,319 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Zim Kalinowski, (@zikalino) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_virtualmachinescalesetinstance +version_added: "0.1.2" +short_description: Get Azure Virtual Machine Scale Set Instance facts +description: + - Get facts of Azure Virtual Machine Scale Set VMs. + +options: + resource_group: + description: + - The name of the resource group. + required: True + vmss_name: + description: + - The name of the VM scale set. + required: True + instance_id: + description: + - The instance ID of the virtual machine. + required: True + latest_model: + type: bool + description: + - Set to C(yes) to upgrade to the latest model. + power_state: + description: + - Use this option to change power state of the instance. + choices: + - 'running' + - 'stopped' + - 'deallocated' + protect_from_scale_in: + type: bool + description: + - turn on/off instance protection from scale in + protect_from_scale_set_actions: + type: bool + description: + - tun on/off instance protection from scale set actions + state: + description: + - State of the VMSS instance. Use C(present) to update an instance and C(absent) to delete an instance. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: Upgrade instance to the latest image + azure_rm_virtualmachinescalesetinstance: + resource_group: myResourceGroup + vmss_name: myVMSS + instance_id: "2" + latest_model: yes + + - name: Turn on protect from scale in + azure_rm_virtualmachinescalesetinstance: + resource_group: myResourceGroup + vmss_name: myVMSS + instance_id: "2" + protect_from_scale_in: true +''' + +RETURN = ''' +instances: + description: + - A list of instances. + returned: always + type: complex + contains: + id: + description: + - Instance resource ID. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/TestGroup/providers/Microsoft.Compute/scalesets/myscaleset/vms/myvm +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.mgmt.compute import ComputeManagementClient + from msrest.serialization import Model +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMVirtualMachineScaleSetInstance(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + vmss_name=dict( + type='str', + required=True + ), + instance_id=dict( + type='str' + ), + latest_model=dict( + type='bool' + ), + power_state=dict( + type='str', + choices=['running', 'stopped', 'deallocated'] + ), + protect_from_scale_in=dict( + type='bool' + ), + protect_from_scale_set_actions=dict( + type='bool' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + # store the results of the module operation + self.results = dict( + changed=False + ) + self.mgmt_client = None + self.resource_group = None + self.vmss_name = None + self.instance_id = None + self.latest_model = None + self.power_state = None + self.state = None + self.protect_from_scale_in = None + self.protect_from_scale_set_actions = None + super(AzureRMVirtualMachineScaleSetInstance, self).__init__(self.module_arg_spec, supports_tags=False) + + def exec_module(self, **kwargs): + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + self.mgmt_client = self.get_mgmt_svc_client(ComputeManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager, + is_track2=True, + api_version='2021-04-01') + + instances = self.get() + + if self.state == 'absent': + for item in instances: + if not self.check_mode: + self.delete(item['instance_id']) + self.results['changed'] = True + self.results['instances'] = [] + else: + if self.latest_model is not None: + for item in instances: + if not item.get('latest_model', None): + if not self.check_mode: + self.apply_latest_model([item['instance_id']]) + item['latest_model'] = True + self.results['changed'] = True + + if self.power_state is not None: + for item in instances: + if self.power_state == 'stopped' and item['power_state'] not in ['stopped', 'stopping']: + if not self.check_mode: + self.stop(item['instance_id']) + self.results['changed'] = True + elif self.power_state == 'deallocated' and item['power_state'] not in ['deallocated']: + if not self.check_mode: + self.deallocate(item['instance_id']) + self.results['changed'] = True + elif self.power_state == 'running' and item['power_state'] not in ['running']: + if not self.check_mode: + self.start(item['instance_id']) + self.results['changed'] = True + if self.protect_from_scale_in is not None or self.protect_from_scale_set_actions is not None: + for item in instances: + protection_policy = item['protection_policy'] + if protection_policy is None or self.protect_from_scale_in != protection_policy['protect_from_scale_in'] or \ + self.protect_from_scale_set_actions != protection_policy['protect_from_scale_set_actions']: + if not self.check_mode: + self.update_protection_policy(self.instance_id, self.protect_from_scale_in, self.protect_from_scale_set_actions) + self.results['changed'] = True + + self.results['instances'] = [{'id': item['id']} for item in instances] + return self.results + + def get(self): + response = None + results = [] + try: + response = self.mgmt_client.virtual_machine_scale_set_vms.get(resource_group_name=self.resource_group, + vm_scale_set_name=self.vmss_name, + instance_id=self.instance_id) + self.log("Response : {0}".format(response)) + except ResourceNotFoundError as e: + self.log('Could not get facts for Virtual Machine Scale Set VM.') + + if response: + results.append(self.format_response(response)) + + return results + + def apply_latest_model(self, instance_id): + try: + poller = self.compute_client.virtual_machine_scale_sets.begin_update_instances(resource_group_name=self.resource_group, + vm_scale_set_name=self.vmss_name, + vm_instance_i_ds={'instance_ids': instance_id}) + self.get_poller_result(poller) + except Exception as exc: + self.log("Error applying latest model {0} - {1}".format(self.vmss_name, str(exc))) + self.fail("Error applying latest model {0} - {1}".format(self.vmss_name, str(exc))) + + def delete(self, instance_id): + try: + self.mgmt_client.virtual_machine_scale_set_vms.begin_delete(resource_group_name=self.resource_group, + vm_scale_set_name=self.vmss_name, + instance_id=instance_id) + except Exception as e: + self.log('Could not delete instance of Virtual Machine Scale Set VM.') + self.fail('Could not delete instance of Virtual Machine Scale Set VM.') + + def start(self, instance_id): + try: + self.mgmt_client.virtual_machine_scale_set_vms.begin_start(resource_group_name=self.resource_group, + vm_scale_set_name=self.vmss_name, + instance_id=instance_id) + except Exception as e: + self.log('Could not start instance of Virtual Machine Scale Set VM.') + self.fail('Could not start instance of Virtual Machine Scale Set VM.') + + def stop(self, instance_id): + try: + self.mgmt_client.virtual_machine_scale_set_vms.begin_power_off(resource_group_name=self.resource_group, + vm_scale_set_name=self.vmss_name, + instance_id=instance_id) + except Exception as e: + self.log('Could not stop instance of Virtual Machine Scale Set VM.') + self.fail('Could not stop instance of Virtual Machine Scale Set VM.') + + def deallocate(self, instance_id): + try: + self.mgmt_client.virtual_machine_scale_set_vms.begin_deallocate(resource_group_name=self.resource_group, + vm_scale_set_name=self.vmss_name, + instance_id=instance_id) + except Exception as e: + self.log('Could not deallocate instance of Virtual Machine Scale Set VM.') + self.fail('Could not deallocate instance of Virtual Machine Scale Set VM.') + + def update_protection_policy(self, instance_id, protect_from_scale_in, protect_from_scale_set_actions): + try: + d = {} + if protect_from_scale_in is not None: + d['protect_from_scale_in'] = protect_from_scale_in + if protect_from_scale_set_actions is not None: + d['protect_from_scale_set_actions'] = protect_from_scale_set_actions + protection_policy = self.compute_models.VirtualMachineScaleSetVMProtectionPolicy(**d) + instance = self.mgmt_client.virtual_machine_scale_set_vms.get(resource_group_name=self.resource_group, + vm_scale_set_name=self.vmss_name, + instance_id=instance_id) + instance.protection_policy = protection_policy + poller = self.mgmt_client.virtual_machine_scale_set_vms.begin_update(resource_group_name=self.resource_group, + vm_scale_set_name=self.vmss_name, + instance_id=instance_id, + parameters=instance) + self.get_poller_result(poller) + except Exception as e: + self.log('Could not update instance protection policy.') + self.fail('Could not update instance protection policy.') + + def format_response(self, item): + d = item.as_dict() + iv = self.mgmt_client.virtual_machine_scale_set_vms.get_instance_view(resource_group_name=self.resource_group, + vm_scale_set_name=self.vmss_name, + instance_id=d.get('instance_id', None)).as_dict() + power_state = "" + for index in range(len(iv['statuses'])): + code = iv['statuses'][index]['code'].split('/') + if code[0] == 'PowerState': + power_state = code[1] + break + d = { + 'id': d.get('id'), + 'tags': d.get('tags'), + 'instance_id': d.get('instance_id'), + 'latest_model': d.get('latest_model_applied'), + 'power_state': power_state, + 'protection_policy': d.get('protection_policy') + } + return d + + +def main(): + AzureRMVirtualMachineScaleSetInstance() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinescalesetinstance_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinescalesetinstance_info.py new file mode 100644 index 000000000..6d9888728 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinescalesetinstance_info.py @@ -0,0 +1,243 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Zim Kalinowski, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_virtualmachinescalesetinstance_info +version_added: "0.1.2" +short_description: Get Azure Virtual Machine Scale Set Instance facts +description: + - Get facts of Azure Virtual Machine Scale Set VMs. + +options: + resource_group: + description: + - The name of the resource group. + required: True + vmss_name: + description: + - The name of the VM scale set. + required: True + instance_id: + description: + - The instance ID of the virtual machine. + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Zim Kalinowski (@zikalino) + +''' + +EXAMPLES = ''' + - name: List VM instances in Virtual Machine ScaleSet + azure_rm_virtualmachinescalesetinstance_info: + resource_group: myResourceGroup + vmss_name: myVMSS +''' + +RETURN = ''' +instances: + description: + - A list of dictionaries containing facts for Virtual Machine Scale Set VM. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/virtualMachineScaleSets/my + VMSS/virtualMachines/2" + tags: + description: + - Resource tags. + returned: always + type: dict + sample: { 'tag1': 'abc' } + instance_id: + description: + - Virtual Machine instance ID. + returned: always + type: str + sample: 0 + name: + description: + - Virtual Machine name. + returned: always + type: str + sample: myVMSS_2 + latest_model: + description: + - Whether applied latest model. + returned: always + type: bool + sample: True + provisioning_state: + description: + - Provisioning state of the Virtual Machine. + returned: always + type: str + sample: Succeeded + power_state: + description: + - Provisioning state of the Virtual Machine's power. + returned: always + type: str + sample: running + vm_id: + description: + - Virtual Machine ID + returned: always + type: str + sample: 94a141a9-4530-46ac-b151-2c7ff09aa823 + image_reference: + description: + - Image reference + returned: always + type: dict + sample: { "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myGroup/providers/Microsoft.Compute/galleries/ + myGallery/images/myImage/versions/10.1.3"} +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.mgmt.compute import ComputeManagementClient + from msrest.serialization import Model + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMVirtualMachineScaleSetVMInfo(AzureRMModuleBase): + def __init__(self): + # define user inputs into argument + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + vmss_name=dict( + type='str', + required=True + ), + instance_id=dict( + type='str' + ), + tags=dict( + type='list', + elements='str' + ) + ) + # store the results of the module operation + self.results = dict( + changed=False + ) + self.mgmt_client = None + self.resource_group = None + self.vmss_name = None + self.instance_id = None + self.tags = None + super(AzureRMVirtualMachineScaleSetVMInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False, facts_module=True) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_virtualmachinescalesetinstance_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_virtualmachinescalesetinstance_facts' module has been renamed to" + + " 'azure_rm_virtualmachinescalesetinstance_info'", + version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + self.mgmt_client = self.get_mgmt_svc_client(ComputeManagementClient, + base_url=self._cloud_environment.endpoints.resource_manager, + is_track2=True, + api_version='2021-04-01') + + if (self.instance_id is None): + self.results['instances'] = self.list() + else: + self.results['instances'] = self.get() + return self.results + + def get(self): + response = None + results = [] + try: + response = self.mgmt_client.virtual_machine_scale_set_vms.get(resource_group_name=self.resource_group, + vm_scale_set_name=self.vmss_name, + instance_id=self.instance_id) + self.log("Response : {0}".format(response)) + except ResourceNotFoundError as e: + self.log('Could not get facts for Virtual Machine Scale Set VM.') + + if response and self.has_tags(response.tags, self.tags): + results.append(self.format_response(response)) + + return results + + def list(self): + items = None + try: + items = self.mgmt_client.virtual_machine_scale_set_vms.list(resource_group_name=self.resource_group, + virtual_machine_scale_set_name=self.vmss_name) + self.log("Response : {0}".format(items)) + except ResourceNotFoundError as e: + self.log('Could not get facts for Virtual Machine ScaleSet VM.') + + results = [] + for item in items: + if self.has_tags(item.tags, self.tags): + results.append(self.format_response(item)) + return results + + def format_response(self, item): + d = item.as_dict() + + iv = self.mgmt_client.virtual_machine_scale_set_vms.get_instance_view(resource_group_name=self.resource_group, + vm_scale_set_name=self.vmss_name, + instance_id=d.get('instance_id', None)).as_dict() + power_state = "" + for index in range(len(iv['statuses'])): + code = iv['statuses'][index]['code'].split('/') + if code[0] == 'PowerState': + power_state = code[1] + break + d = { + 'resource_group': self.resource_group, + 'id': d.get('id', None), + 'tags': d.get('tags', None), + 'instance_id': d.get('instance_id', None), + 'latest_model': d.get('latest_model_applied', None), + 'name': d.get('name', None), + 'provisioning_state': d.get('provisioning_state', None), + 'power_state': power_state, + 'vm_id': d.get('vm_id', None), + 'image_reference': d.get('storage_profile').get('image_reference', None), + 'computer_name': d.get('os_profile').get('computer_name', None) + } + return d + + +def main(): + AzureRMVirtualMachineScaleSetVMInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinesize_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinesize_info.py new file mode 100644 index 000000000..a0fe189af --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinesize_info.py @@ -0,0 +1,158 @@ +#!/usr/bin/python +# +# Copyright (c) 2021 +# Maxence Ardouin +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_virtualmachinesize_info + +version_added: "1.8.0" + +short_description: Get facts for virtual machine sizes + +description: + - Get available virtual machine size profiles for a location + +options: + location: + description: + - Location for which to list the available virtual machine size profiles + required: true + type: str + name: + description: + - Name of a size to get information about + type: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Maxence Ardouin (@nbr23) + +''' + +EXAMPLES = ''' + - name: Get all virtual machine size info in eastus + azure_rm_virtualmachinesize_info: + location: eastus + + - name: Get virtual machine size info for eastus for Standard_A1_v2 + azure_rm_virtualmachinesize_info: + location: eastus + name: Standard_A1_v2 +''' + +RETURN = ''' +sizes: + description: + - List of virtual machine size profiles available for the location. + returned: always + type: complex + contains: + name: + description: + - The name of the virtual machine size + type: str + sample: Standard_A1_v2 + memoryInMB: + description: + - The amount of memory, in MB, supported by the virtual machine size + type: int + sample: 2048 + numberOfCores: + description: + - The number of cores supported by the virtual machine size + type: int + sample: 1 + maxDataDiskCount: + description: + - The maximum number of data disks that can be attached to the virtual machine size + type: int + sample: 2 + osDiskSizeInMB: + description: + - The OS disk size, in MB, allowed by the virtual machine size + type: int + sample: 1047552 + resourceDiskSizeInMB: + description: + - The resource disk size, in MB, allowed by the virtual machine size + type: int + sample: 10240 +''' + +try: + from azure.core.exceptions import ResourceNotFoundError +except Exception: + # This is handled in azure_rm_common + pass + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +AZURE_OBJECT_CLASS = 'VirtualMachineSize' + +AZURE_ENUM_MODULES = ['azure.mgmt.compute.models'] + + +class AzureRMVirtualMachineSizeInfo(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + location=dict(type='str', required=True), + name=dict(type='str') + ) + + self.results = dict( + changed=False, + sizes=[] + ) + + self.location = None + self.name = None + + super(AzureRMVirtualMachineSizeInfo, self).__init__(self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + facts_module=True) + + def exec_module(self, **kwargs): + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + self.results['sizes'] = self.list_items_by_location() + return self.results + + def list_items_by_location(self): + self.log('List items by location') + try: + items = self.compute_client.virtual_machine_sizes.list(location=self.location) + except ResourceNotFoundError as exc: + self.fail("Failed to list items - {0}".format(str(exc))) + return [self.serialize_size(item) for item in items if self.name is None or self.name == item.name] + + def serialize_size(self, size): + ''' + Convert a VirtualMachineSize object to dict. + + :param size: VirtualMachineSize object + :return: dict + ''' + + return self.serialize_obj(size, AZURE_OBJECT_CLASS, enum_modules=AZURE_ENUM_MODULES) + + +def main(): + AzureRMVirtualMachineSizeInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualnetwork.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualnetwork.py new file mode 100644 index 000000000..2cbf0de55 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualnetwork.py @@ -0,0 +1,409 @@ +#!/usr/bin/python +# +# Copyright (c) 2016 Matt Davis, +# Chris Houseknecht, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_virtualnetwork +version_added: "0.1.0" +short_description: Manage Azure virtual networks +description: + - Create, update or delete a virtual networks. Allows setting and updating the available IPv4 address ranges + and setting custom DNS servers. Use the M(azure.azcollection.azure_rm_subnet) module to associate subnets with a virtual network. +options: + resource_group: + description: + - Name of resource group. + required: true + address_prefixes_cidr: + description: + - List of IPv4 address ranges where each is formatted using CIDR notation. + - Required when creating a new virtual network or using I(purge_address_prefixes). + aliases: + - address_prefixes + dns_servers: + description: + - Custom list of DNS servers. + - The first server in the list will be treated as the Primary server. This is an explicit list. + - Existing DNS servers will be replaced with the specified list. + - Use the I(purge_dns_servers) option to remove all custom DNS servers and revert to default Azure servers. + location: + description: + - Valid Azure location. Defaults to location of the resource group. + name: + description: + - Name of the virtual network. + required: true + purge_address_prefixes: + description: + - Use with I(state=present) to remove any existing I(address_prefixes). + type: bool + default: 'no' + aliases: + - purge + purge_dns_servers: + description: + - Use with I(state=present) to remove existing DNS servers, reverting to default Azure servers. Mutually exclusive with DNS servers. + type: bool + default: 'no' + flow_timeout_in_minutes: + description: + - The FlowTimeout value (in minutes) for the Virtual Network. + type: int + state: + description: + - State of the virtual network. Use C(present) to create or update and C(absent) to delete. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Chris Houseknecht (@chouseknecht) + - Matt Davis (@nitzmahone) + +''' + +EXAMPLES = ''' + - name: Create a virtual network + azure_rm_virtualnetwork: + resource_group: myResourceGroup + name: myVirtualNetwork + address_prefixes_cidr: + - "10.1.0.0/16" + - "172.100.0.0/16" + dns_servers: + - "127.0.0.1" + - "127.0.0.2" + tags: + testing: testing + delete: on-exit + + - name: Delete a virtual network + azure_rm_virtualnetwork: + resource_group: myResourceGroup + name: myVirtualNetwork + state: absent +''' +RETURN = ''' +state: + description: + - Current state of the virtual network. + returned: always + type: complex + contains: + address_prefixes: + description: + - The virtual network IPv4 address ranges. + returned: always + type: list + sample: [ + "10.1.0.0/16", + "172.100.0.0/16" + ] + dns_servers: + description: + - DNS servers. + returned: always + type: list + sample: [ + "127.0.0.1", + "127.0.0.3" + ] + etag: + description: + - A unique read-only string that changes whenever the resource is update. + returned: always + type: str + sample: 'W/"0712e87c-f02f-4bb3-8b9e-2da0390a3886"' + id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/ + Microsoft.Network/virtualNetworks/myVirtualNetwork" + location: + description: + - The Geo-location where the resource lives. + returned: always + type: str + sample: eastus + name: + description: + - Resource name. + returned: always + type: str + sample: my_test_network + provisioning_state: + description: + - Provisioning state of the virtual network. + returned: always + type: str + sample: Succeeded + tags: + description: + - Resource tags, such as { 'tags1':'value1' }. + returned: always + type: dict + sample: { 'key1':'value1' } + type: + description: + - Resource type. + returned: always + type: str + sample: Microsoft.Network/virtualNetworks + flow_timeout_in_minutes: + description: + - The FlowTimeout value (in minutes) for the Virtual Network. + type: int + returned: always + sample: 8 +''' + +try: + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase, CIDR_PATTERN + + +def virtual_network_to_dict(vnet): + ''' + Convert a virtual network object to a dict. + :param vnet: VirtualNet object + :return: dict + ''' + results = dict( + id=vnet.id, + name=vnet.name, + location=vnet.location, + type=vnet.type, + tags=vnet.tags, + provisioning_state=vnet.provisioning_state, + flow_timeout_in_minutes=vnet.flow_timeout_in_minutes, + etag=vnet.etag + ) + if vnet.dhcp_options and len(vnet.dhcp_options.dns_servers) > 0: + results['dns_servers'] = [] + for server in vnet.dhcp_options.dns_servers: + results['dns_servers'].append(server) + if vnet.address_space and len(vnet.address_space.address_prefixes) > 0: + results['address_prefixes'] = [] + for space in vnet.address_space.address_prefixes: + results['address_prefixes'].append(space) + return results + + +class AzureRMVirtualNetwork(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True), + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + location=dict(type='str'), + address_prefixes_cidr=dict(type='list', aliases=['address_prefixes']), + dns_servers=dict(type='list',), + purge_address_prefixes=dict(type='bool', default=False, aliases=['purge']), + purge_dns_servers=dict(type='bool', default=False), + flow_timeout_in_minutes=dict(type='int'), + ) + + mutually_exclusive = [ + ('dns_servers', 'purge_dns_servers') + ] + + required_if = [ + ('purge_address_prefixes', True, ['address_prefixes_cidr']) + ] + + self.resource_group = None + self.name = None + self.state = None + self.location = None + self.address_prefixes_cidr = None + self.purge_address_prefixes = None + self.dns_servers = None + self.purge_dns_servers = None + self.flow_timeout_in_minutes = None + + self.results = dict( + changed=False, + state=dict() + ) + + super(AzureRMVirtualNetwork, self).__init__(self.module_arg_spec, + mutually_exclusive=mutually_exclusive, + required_if=required_if, + supports_check_mode=True) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + self.results['check_mode'] = self.check_mode + + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + # Set default location + self.location = resource_group.location + + if self.state == 'present' and self.purge_address_prefixes: + for prefix in self.address_prefixes_cidr: + if not CIDR_PATTERN.match(prefix): + self.fail("Parameter error: invalid address prefix value {0}".format(prefix)) + + changed = False + results = dict() + + try: + self.log('Fetching vnet {0}'.format(self.name)) + vnet = self.network_client.virtual_networks.get(self.resource_group, self.name) + + results = virtual_network_to_dict(vnet) + self.log('Vnet exists {0}'.format(self.name)) + self.log(results, pretty_print=True) + self.check_provisioning_state(vnet, self.state) + + if self.state == 'present': + if self.address_prefixes_cidr: + existing_address_prefix_set = set(vnet.address_space.address_prefixes) + requested_address_prefix_set = set(self.address_prefixes_cidr) + missing_prefixes = requested_address_prefix_set - existing_address_prefix_set + extra_prefixes = existing_address_prefix_set - requested_address_prefix_set + if len(missing_prefixes) > 0: + self.log('CHANGED: there are missing address_prefixes') + changed = True + if not self.purge_address_prefixes: + # add the missing prefixes + for prefix in missing_prefixes: + results['address_prefixes'].append(prefix) + + if len(extra_prefixes) > 0 and self.purge_address_prefixes: + self.log('CHANGED: there are address_prefixes to purge') + changed = True + # replace existing address prefixes with requested set + results['address_prefixes'] = self.address_prefixes_cidr + + update_tags, results['tags'] = self.update_tags(results['tags']) + if update_tags: + changed = True + + if self.dns_servers: + existing_dns_set = set(vnet.dhcp_options.dns_servers) if vnet.dhcp_options else set([]) + requested_dns_set = set(self.dns_servers) + if existing_dns_set != requested_dns_set: + self.log('CHANGED: replacing DNS servers') + changed = True + results['dns_servers'] = self.dns_servers + + if self.purge_dns_servers and vnet.dhcp_options and len(vnet.dhcp_options.dns_servers) > 0: + self.log('CHANGED: purging existing DNS servers') + changed = True + results['dns_servers'] = [] + + if self.flow_timeout_in_minutes and self.flow_timeout_in_minutes != vnet.flow_timeout_in_minutes: + self.log('CHANGED: Update flow_timeout_in_minutes') + changed = True + results['flow_timeout_in_minutes'] = self.flow_timeout_in_minutes + else: + self.flow_timeout_in_minutes = vnet.flow_timeout_in_minutes + elif self.state == 'absent': + self.log("CHANGED: vnet exists but requested state is 'absent'") + changed = True + except ResourceNotFoundError: + self.log('Vnet {0} does not exist'.format(self.name)) + if self.state == 'present': + self.log("CHANGED: vnet {0} does not exist but requested state is 'present'".format(self.name)) + changed = True + + self.results['changed'] = changed + self.results['state'] = results + + if self.check_mode: + return self.results + + if changed: + if self.state == 'present': + if not results: + # create a new virtual network + self.log("Create virtual network {0}".format(self.name)) + if not self.address_prefixes_cidr: + self.fail('Parameter error: address_prefixes_cidr required when creating a virtual network') + vnet_param = self.network_models.VirtualNetwork( + location=self.location, + flow_timeout_in_minutes=self.flow_timeout_in_minutes, + address_space=self.network_models.AddressSpace( + address_prefixes=self.address_prefixes_cidr + ) + ) + if self.dns_servers: + vnet_param.dhcp_options = self.network_models.DhcpOptions( + dns_servers=self.dns_servers + ) + if self.tags: + vnet_param.tags = self.tags + self.results['state'] = self.create_or_update_vnet(vnet_param) + else: + # update existing virtual network + self.log("Update virtual network {0}".format(self.name)) + vnet_param = self.network_models.VirtualNetwork( + location=results['location'], + address_space=self.network_models.AddressSpace( + address_prefixes=results['address_prefixes'] + ), + tags=results['tags'], + subnets=vnet.subnets + ) + if results.get('dns_servers'): + vnet_param.dhcp_options = self.network_models.DhcpOptions( + dns_servers=results['dns_servers'] + ) + if self.flow_timeout_in_minutes: + vnet_param.flow_timeout_in_minutes = self.flow_timeout_in_minutes + self.results['state'] = self.create_or_update_vnet(vnet_param) + elif self.state == 'absent': + self.delete_virtual_network() + self.results['state']['status'] = 'Deleted' + + return self.results + + def create_or_update_vnet(self, vnet): + try: + poller = self.network_client.virtual_networks.begin_create_or_update(self.resource_group, self.name, vnet) + new_vnet = self.get_poller_result(poller) + except Exception as exc: + self.fail("Error creating or updating virtual network {0} - {1}".format(self.name, str(exc))) + return virtual_network_to_dict(new_vnet) + + def delete_virtual_network(self): + try: + poller = self.network_client.virtual_networks.begin_delete(self.resource_group, self.name) + result = self.get_poller_result(poller) + except Exception as exc: + self.fail("Error deleting virtual network {0} - {1}".format(self.name, str(exc))) + return result + + +def main(): + AzureRMVirtualNetwork() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualnetwork_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualnetwork_info.py new file mode 100644 index 000000000..43bfb5314 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualnetwork_info.py @@ -0,0 +1,354 @@ +#!/usr/bin/python +# +# Copyright (c) 2016 Matt Davis, +# Chris Houseknecht, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_virtualnetwork_info + +version_added: "0.0.1" + +short_description: Get virtual network facts + +description: + - Get facts for a specific virtual network or all virtual networks within a resource group. + +options: + name: + description: + - Only show results for a specific virtual network. + resource_group: + description: + - Limit results by resource group. Required when filtering by name. + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Chris Houseknecht (@chouseknecht) + - Matt Davis (@nitzmahone) + +''' + +EXAMPLES = ''' + - name: Get facts for one virtual network + azure_rm_virtualnetwork_info: + resource_group: myResourceGroup + name: secgroup001 + + - name: Get facts for all virtual networks + azure_rm_virtualnetwork_info: + resource_group: myResourceGroup + + - name: Get facts by tags + azure_rm_virtualnetwork_info: + tags: + - testing +''' +RETURN = ''' +azure_virtualnetworks: + description: + - List of virtual network dicts. + returned: always + type: list + example: [{ + "etag": 'W/"532ba1be-ae71-40f2-9232-3b1d9cf5e37e"', + "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/virtualNetworks/vnet2001", + "location": "eastus2", + "name": "vnet2001", + "properties": { + "addressSpace": { + "addressPrefixes": [ + "10.10.0.0/16" + ] + }, + "provisioningState": "Succeeded", + "resourceGuid": "a7ba285f-f7e7-4e17-992a-de4d39f28612", + "subnets": [] + }, + "type": "Microsoft.Network/virtualNetworks" + }] +virtualnetworks: + description: + - List of virtual network dicts with same format as M(azure.azcollection.azure_rm_virtualnetwork) module parameters. + returned: always + type: complex + contains: + id: + description: + - Resource ID of the virtual network. + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/vnet2001 + returned: always + type: str + address_prefixes: + description: + - List of IPv4 address ranges where each is formatted using CIDR notation. + sample: ["10.10.0.0/16"] + returned: always + type: list + dns_servers: + description: + - Custom list of DNS servers. + returned: always + type: list + sample: ["www.azure.com"] + location: + description: + - Valid Azure location. + returned: always + type: str + sample: eastus + tags: + description: + - Tags assigned to the resource. Dictionary of string:string pairs. + returned: always + type: dict + sample: { "tag1": "abc" } + provisioning_state: + description: + - Provisioning state of the resource. + returned: always + sample: Succeeded + type: str + flow_timeout_in_minutes: + description: + - The FlowTimeout value (in minutes) for the Virtual Network. + type: int + returned: always + sample: 8 + name: + description: + - Name of the virtual network. + returned: always + type: str + sample: foo + subnets: + description: + - Subnets associated with the virtual network. + returned: always + type: list + contains: + id: + description: + - Resource ID of the subnet. + returned: always + type: str + sample: "/subscriptions/f64d4ee8-be94-457d-ba26-3fa6b6506cef/resourceGroups/v-xisuRG/providers/ + Microsoft.Network/virtualNetworks/vnetb57dc95232/subnets/vnetb57dc95232" + name: + description: + - Name of the subnet. + returned: always + type: str + sample: vnetb57dc95232 + provisioning_state: + description: + - Provisioning state of the subnet. + returned: always + type: str + sample: Succeeded + address_prefix: + description: + - The address prefix for the subnet. + returned: always + type: str + sample: '10.1.0.0/16' + address_prefixes: + description: + - Both IPv4 and IPv6 address prefixes for the subnet, will return null if only an IPv4 set. + returned: always + type: list + sample: ["10.1.0.0/16", "fdda:e69b:2547:485e::/64"] + network_security_group: + description: + - Existing security group ID with which to associate the subnet. + returned: always + type: str + sample: null + route_table: + description: + - The reference of the RouteTable resource. + returned: always + type: str + sample: null + service_endpoints: + description: + - An array of service endpoints. + returned: always + type: list + sample: [ + { + "locations": [ + "southeastasia", + "eastasia" + ], + "service": "Microsoft.Storage" + } + ] +''' + +try: + from azure.core.exceptions import ResourceNotFoundError +except Exception: + # This is handled in azure_rm_common + pass + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + + +AZURE_OBJECT_CLASS = 'VirtualNetwork' + + +class AzureRMNetworkInterfaceInfo(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type='str'), + resource_group=dict(type='str'), + tags=dict(type='list', elements='str'), + ) + + self.results = dict( + changed=False, + virtualnetworks=[] + ) + + self.required_if = [('name', '*', ['resource_group'])] + + self.name = None + self.resource_group = None + self.tags = None + + super(AzureRMNetworkInterfaceInfo, self).__init__(self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + facts_module=True, + required_if=self.required_if) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_virtualnetwork_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_virtualnetwork_facts' module has been renamed to 'azure_rm_virtualnetwork_info'", version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.name is not None: + results = self.get_item() + elif self.resource_group is not None: + results = self.list_resource_group() + else: + results = self.list_items() + + if is_old_facts: + self.results['ansible_facts'] = { + 'azure_virtualnetworks': self.serialize(results) + } + self.results['virtualnetworks'] = self.curated(results) + + return self.results + + def get_item(self): + self.log('Get properties for {0}'.format(self.name)) + item = None + results = [] + + try: + item = self.network_client.virtual_networks.get(resource_group_name=self.resource_group, + virtual_network_name=self.name) + except ResourceNotFoundError: + pass + + if item and self.has_tags(item.tags, self.tags): + results = [item] + return results + + def list_resource_group(self): + self.log('List items for resource group') + try: + response = self.network_client.virtual_networks.list(self.resource_group) + except ResourceNotFoundError as exc: + self.fail("Failed to list for resource group {0} - {1}".format(self.resource_group, str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(item) + return results + + def list_items(self): + self.log('List all for items') + try: + response = self.network_client.virtual_networks.list_all() + except ResourceNotFoundError as exc: + self.fail("Failed to list all items - {0}".format(str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(item) + return results + + def serialize(self, raws): + self.log("Serialize all items") + return [self.serialize_obj(item, AZURE_OBJECT_CLASS) for item in raws] if raws else [] + + def curated(self, raws): + self.log("Format all items") + return [self.virtualnetwork_to_dict(x) for x in raws] if raws else [] + + def virtualnetwork_to_dict(self, vnet): + results = dict( + id=vnet.id, + name=vnet.name, + location=vnet.location, + tags=vnet.tags, + provisioning_state=vnet.provisioning_state, + flow_timeout_in_minutes=vnet.flow_timeout_in_minutes + ) + if vnet.dhcp_options and len(vnet.dhcp_options.dns_servers) > 0: + results['dns_servers'] = [] + for server in vnet.dhcp_options.dns_servers: + results['dns_servers'].append(server) + if vnet.address_space and len(vnet.address_space.address_prefixes) > 0: + results['address_prefixes'] = [] + for space in vnet.address_space.address_prefixes: + results['address_prefixes'].append(space) + if vnet.subnets and len(vnet.subnets) > 0: + results['subnets'] = [self.subnet_to_dict(x) for x in vnet.subnets] + return results + + def subnet_to_dict(self, subnet): + result = dict( + id=subnet.id, + name=subnet.name, + provisioning_state=subnet.provisioning_state, + address_prefix=subnet.address_prefix, + address_prefixes=subnet.address_prefixes if subnet.address_prefixes else None, + network_security_group=subnet.network_security_group.id if subnet.network_security_group else None, + route_table=subnet.route_table.id if subnet.route_table else None + ) + if subnet.service_endpoints: + result['service_endpoints'] = [{'service': item.service, 'locations': item.locations} for item in subnet.service_endpoints] + return result + + +def main(): + AzureRMNetworkInterfaceInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualnetworkgateway.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualnetworkgateway.py new file mode 100644 index 000000000..4371fb162 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualnetworkgateway.py @@ -0,0 +1,408 @@ +#!/usr/bin/python +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_virtualnetworkgateway + +version_added: "0.0.1" + +short_description: Manage Azure virtual network gateways + +description: + - Create, update or delete a virtual network gateway(VPN Gateway). + - When creating a VPN Gateway you must provide the name of an existing virtual network. + +options: + resource_group: + description: + - Name of a resource group where VPN Gateway exists or will be created. + required: true + name: + description: + - Name of VPN Gateway. + required: true + state: + description: + - State of the VPN Gateway. Use C(present) to create or update VPN gateway and C(absent) to delete VPN gateway. + default: present + choices: + - absent + - present + required: false + location: + description: + - Valid Azure location. Defaults to location of the resource group. + required: false + virtual_network: + description: + - An existing virtual network with which the VPN Gateway will be associated. + - Required when creating a VPN Gateway. + - Can be the name of the virtual network. + - Must be in the same resource group as VPN gateway when specified by name. + - Can be the resource ID of the virtual network. + - Can be a dict which contains I(name) and I(resource_group) of the virtual network. + aliases: + - virtual_network_name + required: true + ip_configurations: + description: + - List of IP configurations. + suboptions: + name: + description: + - Name of the IP configuration. + required: true + private_ip_allocation_method: + description: + - Private IP allocation method. + choices: + - dynamic + - static + default: dynamic + public_ip_address_name: + description: + - Name of the public IP address. Use 'None' to disable the public IP address. + subnet: + description: + - ID of the gateway subnet for VPN. + default: GatewaySubnet + gateway_type: + description: + - The type of this virtual network gateway. + default: vpn + choices: + - vpn + - express_route + vpn_type: + description: + - The type of this virtual private network. + default: route_based + choices: + - route_based + - policy_based + enable_bgp: + description: + - Whether BGP is enabled for this virtual network gateway or not. + default: false + sku: + description: + - The reference of the VirtualNetworkGatewaySku resource which represents the SKU selected for Virtual network gateway. + default: VpnGw1 + choices: + - VpnGw1 + - VpnGw2 + - VpnGw3 + - Standard + - Basic + - HighPerformance + vpn_gateway_generation: + description: + - The generation for this VirtualNetworkGateway. Must be C(None) if C(gateway_type) is not VPN. + default: Generation1 + choices: + - None + - Generation1 + - Generation2 + bgp_settings: + description: + - Virtual network gateway's BGP speaker settings. + suboptions: + asn: + description: + - The BGP speaker's ASN. + required: True + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Madhura Naniwadekar (@Madhura-CSI) +''' + +EXAMPLES = ''' + - name: Create virtual network gateway without bgp settings + azure_rm_virtualnetworkgateway: + resource_group: myResourceGroup + name: myVirtualNetworkGateway + ip_configurations: + - name: testipconfig + private_ip_allocation_method: Dynamic + public_ip_address_name: testipaddr + virtual_network: myVirtualNetwork + tags: + common: "xyz" + + - name: Create virtual network gateway Generation2 + azure_rm_virtualnetworkgateway: + resource_group: myResourceGroup + name: myVirtualNetworkGateway + sku: vpn_gw2 + vpn_gateway_generation: Generation2 + ip_configurations: + - name: testipconfig + private_ip_allocation_method: Dynamic + public_ip_address_name: testipaddr + virtual_network: myVirtualNetwork + tags: + common: "xyz" + + - name: Create virtual network gateway with bgp + azure_rm_virtualnetworkgateway: + resource_group: myResourceGroup + name: myVirtualNetworkGateway + sku: vpn_gw1 + ip_configurations: + - name: testipconfig + private_ip_allocation_method: Dynamic + public_ip_address_name: testipaddr + enable_bgp: yes + virtual_network: myVirtualNetwork + bgp_settings: + asn: 65515 + bgp_peering_address: "169.254.54.209" + tags: + common: "xyz" + + - name: Delete instance of virtual network gateway + azure_rm_virtualnetworkgateway: + resource_group: myResourceGroup + name: myVirtualNetworkGateway + state: absent +''' + +RETURN = ''' +id: + description: + - Virtual Network Gateway resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworkGateways/myV + irtualNetworkGateway" +''' + +try: + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase, CIDR_PATTERN +from ansible.module_utils.common.dict_transformations import _snake_to_camel + + +AZURE_VPN_GATEWAY_OBJECT_CLASS = 'VirtualNetworkGateway' + + +ip_configuration_spec = dict( + name=dict(type='str', required=True), + private_ip_allocation_method=dict(type='str', choices=['Dynamic', 'Static'], default='Dynamic'), + subnet=dict(type='str'), + public_ip_address_name=dict(type='str'), +) + + +sku_spec = dict( + name=dict(type='str', default='VpnGw1'), + tier=dict(type='str', default='VpnGw1') +) + + +bgp_spec = dict( + asn=dict(type='int', required=True), +) + + +def vgw_to_dict(vgw): + results = dict( + id=vgw.id, + name=vgw.name, + location=vgw.location, + gateway_type=vgw.gateway_type, + vpn_type=vgw.vpn_type, + vpn_gateway_generation=vgw.vpn_gateway_generation, + enable_bgp=vgw.enable_bgp, + tags=vgw.tags, + provisioning_state=vgw.provisioning_state, + sku=dict( + name=vgw.sku.name, + tier=vgw.sku.tier + ), + bgp_settings=dict( + asn=vgw.bgp_settings.asn, + bgp_peering_address=vgw.bgp_settings.bgp_peering_address, + peer_weight=vgw.bgp_settings.peer_weight + ) if vgw.bgp_settings else None, + etag=vgw.etag + ) + return results + + +class AzureRMVirtualNetworkGateway(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True), + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + location=dict(type='str'), + ip_configurations=dict(type='list', default=None, elements='dict', options=ip_configuration_spec), + gateway_type=dict(type='str', default='vpn', choices=['vpn', 'express_route']), + vpn_type=dict(type='str', default='route_based', choices=['route_based', 'policy_based']), + vpn_gateway_generation=dict(type='str', default='Generation1', choices=['None', 'Generation1', 'Generation2']), + enable_bgp=dict(type='bool', default=False), + sku=dict(default='VpnGw1', choices=['VpnGw1', 'VpnGw2', 'VpnGw3', 'Standard', 'Basic', 'HighPerformance']), + bgp_settings=dict(type='dict', options=bgp_spec), + virtual_network=dict(type='raw', aliases=['virtual_network_name']) + ) + + self.resource_group = None + self.name = None + self.state = None + self.location = None + self.ip_configurations = None + self.gateway_type = None + self.vpn_type = None + self.enable_bgp = None + self.sku = None + self.vpn_gateway_generation = None + self.bgp_settings = None + + self.results = dict( + changed=False, + state=dict() + ) + + super(AzureRMVirtualNetworkGateway, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True) + + def exec_module(self, **kwargs): + + for key in list(self.module_arg_spec.keys()) + ['tags']: + setattr(self, key, kwargs[key]) + + changed = False + results = dict() + vgw = None + + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + self.location = resource_group.location + + self.virtual_network = self.parse_resource_to_dict(self.virtual_network) + resource_group = self.get_resource_group(self.resource_group) + + try: + vgw = self.network_client.virtual_network_gateways.get(self.resource_group, self.name) + if self.state == 'absent': + self.log("CHANGED: vnet exists but requested state is 'absent'") + changed = True + except ResourceNotFoundError: + if self.state == 'present': + self.log("CHANGED: VPN Gateway {0} does not exist but requested state is 'present'".format(self.name)) + changed = True + + if vgw: + results = vgw_to_dict(vgw) + if self.state == 'present': + update_tags, results['tags'] = self.update_tags(results['tags']) + if update_tags: + changed = True + sku = dict(name=self.sku, tier=self.sku) + if sku != results['sku']: + changed = True + if self.enable_bgp != results['enable_bgp']: + changed = True + if self.bgp_settings and self.bgp_settings['asn'] != results['bgp_settings']['asn']: + changed = True + + self.results['changed'] = changed + self.results['id'] = results.get('id') + + if self.check_mode: + return self.results + if changed: + if self.state == 'present': + if not self.sku: + self.fail('Parameter error: sku is required when creating a vpn gateway') + if not self.ip_configurations: + self.fail('Parameter error: ip_configurations required when creating a vpn gateway') + subnet = self.network_models.SubResource( + id='/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/virtualNetworks/{2}/subnets/GatewaySubnet'.format( + self.virtual_network['subscription_id'], + self.virtual_network['resource_group'], + self.virtual_network['name'])) + + public_ip_address = self.network_models.SubResource( + id='/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/publicIPAddresses/{2}'.format( + self.virtual_network['subscription_id'], + self.virtual_network['resource_group'], + self.ip_configurations[0]['public_ip_address_name'])) + + vgw_ip_configurations = [ + self.network_models.VirtualNetworkGatewayIPConfiguration( + private_ip_allocation_method=ip_config.get('private_ip_allocation_method'), + subnet=subnet, + public_ip_address=public_ip_address, + name='default' + ) for ip_config in self.ip_configurations + ] + + vgw_sku = self.network_models.VirtualNetworkGatewaySku( + name=self.sku, + tier=self.sku + ) + + vgw_bgp_settings = self.network_models.BgpSettings( + asn=self.bgp_settings.get('asn'), + ) if self.bgp_settings else None + vgw = self.network_models.VirtualNetworkGateway( + location=self.location, + ip_configurations=vgw_ip_configurations, + gateway_type=_snake_to_camel(self.gateway_type, True), + vpn_type=_snake_to_camel(self.vpn_type, True), + vpn_gateway_generation=_snake_to_camel(self.vpn_gateway_generation, True), + enable_bgp=self.enable_bgp, + sku=vgw_sku, + bgp_settings=vgw_bgp_settings + ) + if self.tags: + vgw.tags = self.tags + results = self.create_or_update_vgw(vgw) + + else: + results = self.delete_vgw() + + if self.state == 'present': + self.results['id'] = results.get('id') + return self.results + + def create_or_update_vgw(self, vgw): + try: + poller = self.network_client.virtual_network_gateways.begin_create_or_update(self.resource_group, self.name, vgw) + new_vgw = self.get_poller_result(poller) + return vgw_to_dict(new_vgw) + except Exception as exc: + self.fail("Error creating or updating virtual network gateway {0} - {1}".format(self.name, str(exc))) + + def delete_vgw(self): + try: + poller = self.network_client.virtual_network_gateways.begin_delete(self.resource_group, self.name) + self.get_poller_result(poller) + except Exception as exc: + self.fail("Error deleting virtual network gateway {0} - {1}".format(self.name, str(exc))) + return True + + +def main(): + AzureRMVirtualNetworkGateway() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualnetworkpeering.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualnetworkpeering.py new file mode 100644 index 000000000..5bdc35f45 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualnetworkpeering.py @@ -0,0 +1,467 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Yunge Zhu +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_virtualnetworkpeering +version_added: "0.0.1" +short_description: Manage Azure Virtual Network Peering +description: + - Create, update and delete Azure Virtual Network Peering. + +options: + resource_group: + description: + - Name of a resource group where the vnet exists. + required: true + name: + description: + - Name of the virtual network peering. + required: true + virtual_network: + description: + - Name or resource ID of the virtual network to be peered. + required: true + remote_virtual_network: + description: + - Remote virtual network to be peered. + - It can be name of remote virtual network in same resource group. + - It can be remote virtual network resource ID. + - It can be a dict which contains I(name) and I(resource_group) of remote virtual network. + - Required when creating. + allow_virtual_network_access: + description: + - Allows VMs in the remote VNet to access all VMs in the local VNet. + type: bool + default: false + allow_forwarded_traffic: + description: + - Allows forwarded traffic from the VMs in the remote VNet. + type: bool + default: false + use_remote_gateways: + description: + - If remote gateways can be used on this virtual network. + type: bool + default: false + allow_gateway_transit: + description: + - Allows VNet to use the remote VNet's gateway. Remote VNet gateway must have --allow-gateway-transit enabled for remote peering. + - Only 1 peering can have this flag enabled. Cannot be set if the VNet already has a gateway. + type: bool + default: false + state: + description: + - State of the virtual network peering. Use C(present) to create or update a peering and C(absent) to delete it. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Yunge Zhu (@yungezz) +''' + +EXAMPLES = ''' + - name: Create virtual network peering + azure_rm_virtualnetworkpeering: + resource_group: myResourceGroup + virtual_network: myVirtualNetwork + name: myPeering + remote_virtual_network: + resource_group: mySecondResourceGroup + name: myRemoteVirtualNetwork + allow_virtual_network_access: false + allow_forwarded_traffic: true + + - name: Delete the virtual network peering + azure_rm_virtualnetworkpeering: + resource_group: myResourceGroup + virtual_network: myVirtualNetwork + name: myPeering + state: absent +''' +RETURN = ''' +id: + description: + - ID of the Azure virtual network peering. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/myVirtualN + etwork/virtualNetworkPeerings/myPeering" +peering_sync_level: + description: + - The Sync Level of the Peering + type: str + returned: always + sample: "FullyInSync" +''' + +try: + from msrestazure.tools import is_valid_resource_id + from azure.core.exceptions import ResourceNotFoundError + from azure.core.polling import LROPoller +except ImportError: + # This is handled in azure_rm_common + pass + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id + + +def virtual_network_to_dict(vnet): + ''' + Convert a virtual network object to a dict. + ''' + results = dict( + id=vnet.id, + name=vnet.name, + location=vnet.location, + type=vnet.type, + tags=vnet.tags, + provisioning_state=vnet.provisioning_state, + etag=vnet.etag + ) + if vnet.dhcp_options and len(vnet.dhcp_options.dns_servers) > 0: + results['dns_servers'] = [] + for server in vnet.dhcp_options.dns_servers: + results['dns_servers'].append(server) + if vnet.address_space and len(vnet.address_space.address_prefixes) > 0: + results['address_prefixes'] = [] + for space in vnet.address_space.address_prefixes: + results['address_prefixes'].append(space) + return results + + +def vnetpeering_to_dict(vnetpeering): + ''' + Convert a virtual network peering object to a dict. + ''' + results = dict( + id=vnetpeering.id, + name=vnetpeering.name, + remote_virtual_network=vnetpeering.remote_virtual_network.id, + remote_address_space=dict( + address_prefixes=vnetpeering.remote_address_space.address_prefixes + ), + peering_state=vnetpeering.peering_state, + provisioning_state=vnetpeering.provisioning_state, + use_remote_gateways=vnetpeering.use_remote_gateways, + allow_gateway_transit=vnetpeering.allow_gateway_transit, + allow_forwarded_traffic=vnetpeering.allow_forwarded_traffic, + allow_virtual_network_access=vnetpeering.allow_virtual_network_access, + etag=vnetpeering.etag, + peering_sync_level=vnetpeering.peering_sync_level + ) + return results + + +class AzureRMVirtualNetworkPeering(AzureRMModuleBase): + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + virtual_network=dict( + type='raw' + ), + remote_virtual_network=dict( + type='raw' + ), + allow_virtual_network_access=dict( + type='bool', + default=False + ), + allow_forwarded_traffic=dict( + type='bool', + default=False + ), + allow_gateway_transit=dict( + type='bool', + default=False + ), + use_remote_gateways=dict( + type='bool', + default=False + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.name = None + self.virtual_network = None + self.remote_virtual_network = None + self.allow_virtual_network_access = None + self.allow_forwarded_traffic = None + self.allow_gateway_transit = None + self.use_remote_gateways = None + + self.results = dict(changed=False) + + super(AzureRMVirtualNetworkPeering, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=False) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()): + setattr(self, key, kwargs[key]) + + to_be_updated = False + to_be_synced = False + + resource_group = self.get_resource_group(self.resource_group) + + # parse virtual_network + self.virtual_network = self.parse_resource_to_dict(self.virtual_network) + if self.virtual_network['resource_group'] != self.resource_group: + self.fail('Resource group of virtual_network is not same as param resource_group') + + # parse remote virtual_network + self.remote_virtual_network = self.format_vnet_id(self.remote_virtual_network) + + # get vnet peering + response = self.get_vnet_peering() + + if self.state == 'present': + if response: + # check vnet id not changed + existing_vnet = self.parse_resource_to_dict(response['id']) + if existing_vnet['resource_group'] != self.virtual_network['resource_group'] or \ + existing_vnet['name'] != self.virtual_network['name']: + self.fail("Cannot update virtual_network of Virtual Network Peering!") + + # check remote vnet id not changed + if response['remote_virtual_network'].lower() != self.remote_virtual_network.lower(): + self.fail("Cannot update remote_virtual_network of Virtual Network Peering!") + + # check if update + to_be_updated = self.check_update(response) + to_be_synced = self.check_sync(response) + + else: + # not exists, create new vnet peering + to_be_updated = True + + # check if vnet exists + virtual_network = self.get_vnet(self.virtual_network['resource_group'], self.virtual_network['name']) + if not virtual_network: + self.fail("Virtual network {0} in resource group {1} does not exist!".format( + self.virtual_network['name'], self.virtual_network['resource_group'])) + + elif self.state == 'absent': + if response: + self.log('Delete Azure Virtual Network Peering') + self.results['changed'] = True + self.results['id'] = response['id'] + + if self.check_mode: + return self.results + + response = self.delete_vnet_peering() + + else: + self.log("Azure Virtual Network Peering {0} does not exist in resource group {1}".format(self.name, self.resource_group)) + + if to_be_updated: + self.results['changed'] = True + + if self.check_mode: + return self.results + + response = self.create_or_update_vnet_peering() + self.results['id'] = response['id'] + to_be_synced = self.check_sync(response) + + if to_be_synced: + self.results['changed'] = True + + if self.check_mode: + return self.results + sync_response = self.sync_vnet_peering() + self.results['peering_sync_level'] = sync_response['peering_sync_level'] + + return self.results + + def format_vnet_id(self, vnet): + if not vnet: + return vnet + if isinstance(vnet, dict) and vnet.get('name') and vnet.get('resource_group'): + remote_vnet_id = format_resource_id(vnet['name'], + self.subscription_id, + 'Microsoft.Network', + 'virtualNetworks', + vnet['resource_group']) + elif isinstance(vnet, str): + if is_valid_resource_id(vnet): + remote_vnet_id = vnet + else: + remote_vnet_id = format_resource_id(vnet, + self.subscription_id, + 'Microsoft.Network', + 'virtualNetworks', + self.resource_group) + else: + self.fail("remote_virtual_network could be a valid resource id, dict of name and resource_group, name of virtual network in same resource group.") + return remote_vnet_id + + def check_sync(self, exisiting_vnet_peering): + if exisiting_vnet_peering['peering_sync_level'] == 'LocalNotInSync': + return True + return False + + def check_update(self, exisiting_vnet_peering): + if self.allow_forwarded_traffic != exisiting_vnet_peering['allow_forwarded_traffic']: + return True + if self.allow_gateway_transit != exisiting_vnet_peering['allow_gateway_transit']: + return True + if self.allow_virtual_network_access != exisiting_vnet_peering['allow_virtual_network_access']: + return True + if self.use_remote_gateways != exisiting_vnet_peering['use_remote_gateways']: + return True + return False + + def get_vnet(self, resource_group, vnet_name): + ''' + Get Azure Virtual Network + :return: deserialized Azure Virtual Network + ''' + self.log("Get the Azure Virtual Network {0}".format(vnet_name)) + vnet = self.network_client.virtual_networks.get(resource_group, vnet_name) + + if vnet: + results = virtual_network_to_dict(vnet) + return results + return False + + def sync_vnet_peering(self): + ''' + Creates or Update Azure Virtual Network Peering. + + :return: deserialized Azure Virtual Network Peering instance state dictionary + ''' + self.log("Creating or Updating the Azure Virtual Network Peering {0}".format(self.name)) + + vnet_id = format_resource_id(self.virtual_network['name'], + self.subscription_id, + 'Microsoft.Network', + 'virtualNetworks', + self.virtual_network['resource_group']) + peering = self.network_models.VirtualNetworkPeering( + id=vnet_id, + name=self.name, + remote_virtual_network=self.network_models.SubResource(id=self.remote_virtual_network), + allow_virtual_network_access=self.allow_virtual_network_access, + allow_gateway_transit=self.allow_gateway_transit, + allow_forwarded_traffic=self.allow_forwarded_traffic, + use_remote_gateways=self.use_remote_gateways + ) + + try: + response = self.network_client.virtual_network_peerings.begin_create_or_update(self.resource_group, + self.virtual_network['name'], + self.name, + peering, + sync_remote_address_space=True) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + return vnetpeering_to_dict(response) + except Exception as exc: + self.fail("Error creating Azure Virtual Network Peering: {0}.".format(exc.message)) + + def create_or_update_vnet_peering(self): + ''' + Creates or Update Azure Virtual Network Peering. + + :return: deserialized Azure Virtual Network Peering instance state dictionary + ''' + self.log("Creating or Updating the Azure Virtual Network Peering {0}".format(self.name)) + + vnet_id = format_resource_id(self.virtual_network['name'], + self.subscription_id, + 'Microsoft.Network', + 'virtualNetworks', + self.virtual_network['resource_group']) + peering = self.network_models.VirtualNetworkPeering( + id=vnet_id, + name=self.name, + remote_virtual_network=self.network_models.SubResource(id=self.remote_virtual_network), + allow_virtual_network_access=self.allow_virtual_network_access, + allow_gateway_transit=self.allow_gateway_transit, + allow_forwarded_traffic=self.allow_forwarded_traffic, + use_remote_gateways=self.use_remote_gateways + ) + + try: + response = self.network_client.virtual_network_peerings.begin_create_or_update(self.resource_group, + self.virtual_network['name'], + self.name, + peering) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + return vnetpeering_to_dict(response) + except Exception as exc: + self.fail("Error creating Azure Virtual Network Peering: {0}.".format(exc.message)) + + def delete_vnet_peering(self): + ''' + Deletes the specified Azure Virtual Network Peering + + :return: True + ''' + self.log("Deleting Azure Virtual Network Peering {0}".format(self.name)) + try: + poller = self.network_client.virtual_network_peerings.begin_delete( + self.resource_group, self.virtual_network['name'], self.name) + self.get_poller_result(poller) + return True + except Exception as e: + self.fail("Error deleting the Azure Virtual Network Peering: {0}".format(e.message)) + return False + + def get_vnet_peering(self): + ''' + Gets the Virtual Network Peering. + + :return: deserialized Virtual Network Peering + ''' + self.log( + "Checking if Virtual Network Peering {0} is present".format(self.name)) + try: + response = self.network_client.virtual_network_peerings.get(self.resource_group, + self.virtual_network['name'], + self.name) + self.log("Response : {0}".format(response)) + + return vnetpeering_to_dict(response) + except ResourceNotFoundError: + self.log('Did not find the Virtual Network Peering.') + return False + + +def main(): + """Main execution""" + AzureRMVirtualNetworkPeering() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualnetworkpeering_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualnetworkpeering_info.py new file mode 100644 index 000000000..87563f01a --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualnetworkpeering_info.py @@ -0,0 +1,260 @@ +#!/usr/bin/python +# +# Copyright (c) 2019 Yunge Zhu (@yungezz) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_virtualnetworkpeering_info +version_added: "0.0.1" +short_description: Get facts of Azure Virtual Network Peering +description: + - Get facts of Azure Virtual Network Peering. + +options: + resource_group: + description: + - Name of a resource group where the vnet exists. + required: True + virtual_network: + description: + - Name or resource ID of a virtual network. + required: True + name: + description: + - Name of the virtual network peering. + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Yunge Zhu (@yungezz) +''' + +EXAMPLES = ''' + - name: Get virtual network peering by name + azure_rm_virtualnetworkpeering_info: + resource_group: myResourceGroup + virtual_network: myVnet1 + name: myVnetPeer + + - name: List virtual network peering of virtual network + azure_rm_virtualnetworkpeering: + resource_group: myResourceGroup + virtual_network: myVnet1 +''' + +RETURN = ''' +vnetpeerings: + description: + - A list of Virtual Network Peering facts. + returned: always + type: complex + contains: + id: + description: ID of current Virtual Network peering. + returned: always + type: str + sample: + "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/myVnet/virtualNetworkPeerings/peer1" + name: + description: + - Name of Virtual Network peering. + returned: always + type: str + sample: myPeering + remote_virtual_network: + description: + - ID of remote Virtual Network to be peered to. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/myVnet2 + remote_address_space: + description: + - The reference of the remote Virtual Network address space. + type: complex + returned: always + contains: + address_prefixes: + description: + - A list of address blocks reserved for this Virtual Network in CIDR notation. + returned: always + type: list + sample: 10.1.0.0/16 + peering_state: + description: + - The state of the virtual network peering. + returned: always + type: str + sample: Connected + peering_sync_level: + description: + - The Sync Level of the Peering + type: str + returned: always + sample: "FullyInSync" + provisioning_state: + description: + - The provisioning state of the resource. + returned: always + type: str + sample: Succeeded + allow_forwarded_traffic: + description: + - Whether forwarded traffic from the VMs in the remote Virtual Network will be allowed/disallowed. + returned: always + type: bool + sample: False + allow_gateway_transit: + description: + - Whether gateway links can be used in remote Virtual Networking to link to this Virtual Network. + returned: always + type: bool + sample: False + allow_virtual_network_access: + description: + - Whether the VMs in the linked Virtual Network space can access all the VMs in local Virtual Network space. + returned: always + type: bool + sample: False + use_remote_gateways: + description: + - Whether remote gateways can be used on this Virtual Network. + returned: always + type: bool + sample: False +''' + +try: + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + + +def vnetpeering_to_dict(vnetpeering): + ''' + Convert a virtual network peering object to a dict. + ''' + results = dict( + id=vnetpeering.id, + name=vnetpeering.name, + remote_virtual_network=vnetpeering.remote_virtual_network.id, + remote_address_space=dict( + address_prefixes=vnetpeering.remote_address_space.address_prefixes + ), + peering_state=vnetpeering.peering_state, + provisioning_state=vnetpeering.provisioning_state, + use_remote_gateways=vnetpeering.use_remote_gateways, + allow_gateway_transit=vnetpeering.allow_gateway_transit, + allow_forwarded_traffic=vnetpeering.allow_forwarded_traffic, + allow_virtual_network_access=vnetpeering.allow_virtual_network_access, + peering_sync_level=vnetpeering.peering_sync_level + ) + return results + + +class AzureRMVirtualNetworkPeeringInfo(AzureRMModuleBase): + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str' + ), + virtual_network=dict( + type='raw', + required=True + ) + ) + + self.resource_group = None + self.name = None + self.virtual_network = None + + self.results = dict(changed=False) + + super(AzureRMVirtualNetworkPeeringInfo, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=False) + + def exec_module(self, **kwargs): + """Main module execution method""" + is_old_facts = self.module._name == 'azure_rm_virtualnetworkpeering_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_virtualnetworkpeering_facts' module has been renamed to 'azure_rm_virtualnetworkpeering_info'", + version=(2.9, )) + + for key in list(self.module_arg_spec.keys()): + setattr(self, key, kwargs[key]) + + # parse virtual_network + self.virtual_network = self.parse_resource_to_dict(self.virtual_network) + if self.virtual_network['resource_group'] != self.resource_group: + self.fail('Resource group of virtual_network is not same as param resource_group') + + self.results['vnetpeerings'] = [] + # get vnet peering + if self.name: + self.results['vnetpeerings'] = self.get_by_name() + else: + self.results['vnetpeerings'] = self.list_by_vnet() + + return self.results + + def get_by_name(self): + ''' + Gets the Virtual Network Peering. + + :return: List of Virtual Network Peering + ''' + self.log( + "Get Virtual Network Peering {0}".format(self.name)) + results = [] + try: + response = self.network_client.virtual_network_peerings.get(resource_group_name=self.resource_group, + virtual_network_name=self.virtual_network['name'], + virtual_network_peering_name=self.name) + self.log("Response : {0}".format(response)) + results.append(vnetpeering_to_dict(response)) + except ResourceNotFoundError: + self.log('Did not find the Virtual Network Peering.') + return results + + def list_by_vnet(self): + ''' + Lists the Virtual Network Peering in specific Virtual Network. + + :return: List of Virtual Network Peering + ''' + self.log( + "List Virtual Network Peering in Virtual Network {0}".format(self.virtual_network['name'])) + results = [] + try: + response = self.network_client.virtual_network_peerings.list(resource_group_name=self.resource_group, + virtual_network_name=self.virtual_network['name']) + self.log("Response : {0}".format(response)) + if response: + for p in response: + results.append(vnetpeering_to_dict(p)) + except ResourceNotFoundError: + self.log('Did not find the Virtual Network Peering.') + return results + + +def main(): + """Main execution""" + AzureRMVirtualNetworkPeeringInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualwan.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualwan.py new file mode 100644 index 000000000..10869012d --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualwan.py @@ -0,0 +1,399 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 Fred-Sun, (@Fred-Sun) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_virtualwan +version_added: '1.5.0' +short_description: Manage Azure VirtualWan instance +description: + - Create, update and delete instance of Azure VirtualWan. +options: + resource_group: + description: + - The resource group name of the VirtualWan. + required: true + type: str + office365_local_breakout_category: + description: + - Specifies the Office365 local breakout category. + - Default value is C(None). + type: str + choices: + - Optimize + - OptimizeAndAllow + - All + - None + name: + description: + - The name of the VirtualWAN being retrieved. + required: true + type: str + location: + description: + - The virtual wan location. + type: str + disable_vpn_encryption: + description: + - Vpn encryption to be disabled or not. + type: bool + virtual_hubs: + description: + - List of VirtualHubs in the VirtualWAN. + type: list + suboptions: + id: + description: + - The virtual hub resource ID. + type: str + vpn_sites: + description: + - List of VpnSites in the VirtualWAN. + type: list + suboptions: + id: + description: + - The vpn site resource ID. + type: str + allow_branch_to_branch_traffic: + description: + - True if branch to branch traffic is allowed. + type: bool + allow_vnet_to_vnet_traffic: + description: + - C(True) if Vnet to Vnet traffic is allowed. + type: bool + virtual_wan_type: + description: + - The type of the VirtualWAN. + type: str + choices: + - Basic + - Standard + state: + description: + - Assert the state of the VirtualWan. + - Use C(present) to create or update an VirtualWan and C(absent) to delete it. + default: present + choices: + - absent + - present +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags +author: + - Fred-Sun (@Fred-Sun) + +''' + +EXAMPLES = ''' + - name: Create a VirtualWan + azure_rm_virtualwan: + resource_group: myResouceGroup + name: testwan + disable_vpn_encryption: true + allow_branch_to_branch_traffic: true + allow_vnet_to_vnet_traffic: true + virtual_wan_type: Standard + + - name: Delete the VirtualWan + azure_rm_virtualwan: + resource_group: myResouceGroup + name: testwan + state: absent + +''' + +RETURN = ''' +state: + description: + - Current state of the virtual wan. + type: complex + returned: success + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: /subscriptions/xxx-xxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualWans/virtual_wan_name + name: + description: + - Resource name. + returned: always + type: str + sample: virtualwanb57dc9555691 + type: + description: + - Resource type. + returned: always + type: str + sample: Microsoft.Network/virtualWans + location: + description: + - The virtual wan resource location. + returned: always + type: str + sample: eastus + tags: + description: + - Resource tags. + returned: always + type: dict + sample: { 'key1': 'value1'} + etag: + description: + - A unique read-only string that changes whenever the resource is updated. + returned: always + type: str + sample: 52def36b-84b6-49aa-a825-16ba167fc559 + disable_vpn_encryption: + description: + - Vpn encryption to be disabled or not. + returned: always + type: bool + sample: true + virtual_hubs: + description: + - List of VirtualHubs in the VirtualWAN. + type: complex + returned: always + contains: + id: + description: + - The virtual hubs ID. + type: str + returned: always + sample: /subscriptions/xxx-xxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualHubs/test + vpn_sites: + description: + - List of VpnSites in the VirtualWAN. + returned: always + type: list + contains: + id: + description: + - The vpn sites resouce ID. + returned: always + type: str + sample: /subscriptions/xxx-xxx/resourceGroups/resource_group/providers/Microsoft.Network/vpnSites/test1 + allow_branch_to_branch_traffic: + description: + - True if branch to branch traffic is allowed. + returned: always + type: bool + sample: true + allow_vnet_to_vnet_traffic: + description: + - True if Vnet to Vnet traffic is allowed. + returned: always + type: bool + sample: true + office365_local_breakout_category: + description: + - The office local breakout category. + returned: always + type: str + sample: None + provisioning_state: + description: + - The provisioning state of the virtual WAN resource. + returned: always + type: str + sample: Succeeded + virtual_wan_type: + description: + - The type of the VirtualWAN. + returned: always + type: str + sample: Standard + +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.core.polling import LROPoller + from msrestazure.azure_operation import AzureOperationPoller +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMVirtualWan(AzureRMModuleBaseExt): + def __init__(self): + self.module_arg_spec = dict( + location=dict( + type='str' + ), + resource_group=dict( + type='str', + required=True + ), + office365_local_breakout_category=dict( + type='str', + choices=['Optimize', 'OptimizeAndAllow', 'All', 'None'] + ), + name=dict( + type='str', + required=True + ), + disable_vpn_encryption=dict( + type='bool', + disposition='/disable_vpn_encryption' + ), + virtual_hubs=dict( + type='list', + updatable=False, + disposition='/virtual_hubs', + options=dict( + id=dict( + type='str', + disposition='id' + ) + ) + ), + vpn_sites=dict( + type='list', + updatable=False, + disposition='/vpn_sites', + options=dict( + id=dict( + type='str', + disposition='id' + ) + ) + ), + allow_branch_to_branch_traffic=dict( + type='bool', + disposition='/allow_branch_to_branch_traffic' + ), + allow_vnet_to_vnet_traffic=dict( + type='bool', + updatable=False, + disposition='/allow_vnet_to_vnet_traffic' + ), + virtual_wan_type=dict( + type='str', + disposition='/virtual_wan_type', + choices=['Basic', 'Standard'] + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.name = None + self.location = None + self.body = {} + + self.results = dict(changed=False) + self.state = None + self.to_do = Actions.NoAction + + super(AzureRMVirtualWan, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()): + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + self.body[key] = kwargs[key] + + self.inflate_parameters(self.module_arg_spec, self.body, 0) + + resource_group = self.get_resource_group(self.resource_group) + if self.location is None: + # Set default location + self.location = resource_group.location + self.body['location'] = self.location + + old_response = None + response = None + + old_response = self.get_resource() + + if not old_response: + if self.state == 'present': + self.to_do = Actions.Create + else: + if self.state == 'absent': + self.to_do = Actions.Delete + else: + modifiers = {} + self.create_compare_modifiers(self.module_arg_spec, '', modifiers) + self.results['modifiers'] = modifiers + self.results['compare'] = [] + if not self.default_compare(modifiers, self.body, old_response, '', self.results): + self.to_do = Actions.Update + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.results['changed'] = True + if self.check_mode: + return self.results + response = self.create_update_resource() + elif self.to_do == Actions.Delete: + self.results['changed'] = True + if self.check_mode: + return self.results + self.delete_resource() + else: + self.results['changed'] = False + response = old_response + if response is not None: + self.results['state'] = response + return self.results + + def create_update_resource(self): + try: + response = self.network_client.virtual_wans.begin_create_or_update(resource_group_name=self.resource_group, + virtual_wan_name=self.name, + wan_parameters=self.body) + if isinstance(response, AzureOperationPoller) or isinstance(response, LROPoller): + response = self.get_poller_result(response) + except Exception as exc: + self.log('Error attempting to create the VirtualWan instance.') + self.fail('Error creating the VirtualWan instance: {0}'.format(str(exc))) + return response.as_dict() + + def delete_resource(self): + try: + response = self.network_client.virtual_wans.begin_delete(resource_group_name=self.resource_group, + virtual_wan_name=self.name) + except Exception as e: + self.log('Error attempting to delete the VirtualWan instance.') + self.fail('Error deleting the VirtualWan instance: {0}'.format(str(e))) + + return True + + def get_resource(self): + try: + response = self.network_client.virtual_wans.get(resource_group_name=self.resource_group, + virtual_wan_name=self.name) + except ResourceNotFoundError as e: + return False + return response.as_dict() + + +def main(): + AzureRMVirtualWan() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualwan_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualwan_info.py new file mode 100644 index 000000000..562422129 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualwan_info.py @@ -0,0 +1,239 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 Fred-Sun, (@Fred-Sun) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_virtualwan_info +version_added: '1.5.0' +short_description: Get VirtualWan info +description: + - Get info of VirtualWan. +options: + resource_group: + description: + - The resource group name of the VirtualWan. + type: str + name: + description: + - The name of the VirtualWAN being retrieved. + type: str +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags +author: + - Fred-Sun (@Fred-Sun) +''' + +EXAMPLES = ''' + - name: Get Virtual WAN by name + azure_rm_virtualwan_info: + resource_group: myResouceGroup + name: testwan + + - name: List all Virtual WANLs by resource group + azure_rm_virtualwan_info: + resource_group: myResourceGroup + + - name: List all Virtual WANs by subscription_id + azure_rm_virtualwan_info: + +''' + +RETURN = ''' +virtual_wans: + description: + - A list of dict results where the key is the name of the VirtualWan and the values are the facts for that VirtualWan. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: /subscriptions/xxx-xxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualWans/testwan + name: + description: + - Resource name. + returned: always + type: str + sample: testwan + type: + description: + - Resource type. + returned: always + type: str + sample: Microsoft.Network/virtualWans + location: + description: + - Resource location. + returned: always + type: str + sample: eastus + tags: + description: + - Resource tags. + returned: always + type: dict + sample: { 'key1': 'value1'} + etag: + description: + - A unique read-only string that changes whenever the resource is updated. + returned: always + type: str + sample: "86df6f3d-19f2-4cc8-8574-47921de4a6f1" + disable_vpn_encryption: + description: + - Vpn encryption to be disabled or not. + returned: always + type: bool + sample: false + virtual_hubs: + description: + - List of VirtualHubs in the VirtualWAN. + type: complex + contains: + id: + description: + - The virtual hubs list of the virtual wan. + returned: always + type: str + sample: /subscriptions/xxx-xxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualHubs/test + vpn_sites: + description: + - List of VpnSites in the VirtualWAN. + returned: always + type: complex + contains: + id: + description: + - The vpn site resource ID. + returned: always + type: str + sample: /subscriptions/xxx-xxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/vpnSites/test1 + allow_branch_to_branch_traffic: + description: + - True if branch to branch traffic is allowed. + returned: always + type: bool + sample: true + allow_vnet_to_vnet_traffic: + description: + - True if Vnet to Vnet traffic is allowed. + returned: always + type: bool + sample: True + office365_local_breakout_category: + description: + - The office local breakout category. + returned: always + type: str + sample: None + provisioning_state: + description: + - The provisioning state of the virtual WAN resource. + returned: always + type: str + sample: Succeeded + virtual_wan_type: + description: + - The type of virtual wan. + returned: always + type: str + sample: Standard +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBase +try: + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMVirtualWanInfo(AzureRMModuleBase): + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str' + ), + name=dict( + type='str' + ) + ) + + self.resource_group = None + self.name = None + + self.results = dict(changed=False) + + super(AzureRMVirtualWanInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=True) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if (self.resource_group is not None and self.name is not None): + self.results['virtual_wans'] = self.format_item(self.get()) + elif (self.resource_group is not None): + self.results['virtual_wans'] = self.format_item(self.list_by_resource_group()) + else: + self.results['virtual_wans'] = self.format_item(self.list()) + return self.results + + def get(self): + response = None + + try: + response = self.network_client.virtual_wans.get(resource_group_name=self.resource_group, + virtual_wan_name=self.name) + except ResourceNotFoundError as e: + self.log('Could not get info for @(Model.ModuleOperationNameUpper).') + + return response + + def list_by_resource_group(self): + response = None + + try: + response = self.network_client.virtual_wans.list_by_resource_group(resource_group_name=self.resource_group) + except ResourceNotFoundError as e: + self.log('Could not get info for @(Model.ModuleOperationNameUpper).') + + return response + + def list(self): + response = None + + try: + response = self.network_client.virtual_wans.list() + except ResourceNotFoundError as e: + self.log('Could not get info for @(Model.ModuleOperationNameUpper).') + + return response + + def format_item(self, item): + if hasattr(item, 'as_dict'): + return [item.as_dict()] + else: + result = [] + items = list(item) + for tmp in items: + result.append(tmp.as_dict()) + return result + + +def main(): + AzureRMVirtualWanInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_vmbackuppolicy.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_vmbackuppolicy.py new file mode 100644 index 000000000..3f6a6e862 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_vmbackuppolicy.py @@ -0,0 +1,458 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 Suyeb Ansari (@suyeb786) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_vmbackuppolicy +version_added: '1.1.0' +short_description: Create or Delete Azure VM Backup Policy +description: + - Create or Delete Azure VM Backup Policy. +options: + name: + description: + - Policy Name. + required: true + type: str + resource_group: + description: + - The name of the resource group. + required: true + type: str + vault_name: + description: + - Recovery Service Vault Name. + required: true + type: str + time: + description: + - Retention times of retention policy in UTC. + required: false + default: '12:00' + type: str + weekdays: + description: + - List of days of the week. + required: false + default: ['Monday'] + type: list + weeks: + description: + - List of weeks of month. + required: false + default: ['First'] + type: list + months: + description: + - List of months of year of yearly retention policy. + required: false + default: ['January'] + type: list + count: + description: + - Count of duration types. Retention duration is obtained by the counting the duration type Count times. + required: false + default: 1 + type: int + state: + description: + - Assert the state of the protection item. + - Use C(present) for Creating Backup Policy. + - Use C(absent) for Deleting Backup Policy. + default: present + type: str + choices: + - present + - absent +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags +author: + - Suyeb Ansari (@suyeb786) +''' + +EXAMPLES = ''' +- name: Create VM Backup Policy + azure_rm_backvmuppolicy: + name: 'myBackupPolicy' + vault_name: 'myVault' + resource_group: 'myResourceGroup' + time: '18:00' + weekdays: ['Monday', 'Thursday', 'Friday'] + weeks: ['First', 'Fourth'] + months: ['February', 'November'] + count: 4 + state: present +- name: Delete VM Backup Policy + azure_rm_backvmuppolicy: + name: 'myBackupPolicy' + vault_name: 'myVault' + resource_group: 'myResourceGroup' + state: absent +''' + +RETURN = ''' +response: + description: + - The response about the current state of the backup policy. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxx/resourceGroups/resourcegroup_name/ \ + providers/Microsoft.RecoveryServices/vaults/myVault/backupPolicies/myBackup" + name: + description: + - Backup Policy Name. + returned: always + type: str + sample: "myBackup" + properties: + description: + - The backup policy properties. + returned: always + type: dict + sample: { + "backupManagementType": "AzureIaasVM", + "schedulePolicy": { + "schedulePolicyType": "SimpleSchedulePolicy", + "scheduleRunFrequency": "Weekly", + "scheduleRunDays": [ + "Monday", + "Wednesday", + "Thursday" + ], + "scheduleRunTimes": [ + "2018-01-24T10:00:00Z" + ], + "scheduleWeeklyFrequency": 0 + }, + "retentionPolicy": { + "retentionPolicyType": "LongTermRetentionPolicy", + "weeklySchedule": { + "daysOfTheWeek": [ + "Monday", + "Wednesday", + "Thursday" + ], + "retentionTimes": [ + "2018-01-24T10:00:00Z" + ], + "retentionDuration": { + "count": 1, + "durationType": "Weeks" + } + }, + "monthlySchedule": { + "retentionScheduleFormatType": "Weekly", + "retentionScheduleWeekly": { + "daysOfTheWeek": [ + "Wednesday", + "Thursday" + ], + "weeksOfTheMonth": [ + "First", + "Third" + ] + }, + "retentionTimes": [ + "2018-01-24T10:00:00Z" + ], + "retentionDuration": { + "count": 2, + "durationType": "Months" + } + }, + "yearlySchedule": { + "retentionScheduleFormatType": "Weekly", + "monthsOfYear": [ + "February", + "November" + ], + "retentionScheduleWeekly": { + "daysOfTheWeek": [ + "Monday", + "Thursday" + ], + "weeksOfTheMonth": [ + "Fourth" + ] + }, + "retentionTimes": [ + "2018-01-24T10:00:00Z" + ], + "retentionDuration": { + "count": 4, + "durationType": "Years" + } + } + }, + "timeZone": "Pacific Standard Time", + "protectedItemsCount": 0 + } + type: + description: + - Resource type. + returned: always + type: str + sample: "Microsoft.RecoveryServices/vaults/backupPolicies" +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_rest import GenericRestClient +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt +import time +import json + + +class VMBackupPolicy(AzureRMModuleBaseExt): + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + vault_name=dict( + type='str', + required=True + ), + time=dict( + type='str', + default='12:00' + ), + weekdays=dict( + type='list', + default=['Monday'] + ), + weeks=dict( + type='list', + default=['First'] + ), + months=dict( + type='list', + default=['January'] + ), + count=dict( + type='int', + default=1 + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.name = None + self.time = None + self.state = None + self.vault_name = None + self.count = None + self.weekdays = None + self.weeks = None + self.months = None + + self.results = dict(changed=False) + self.mgmt_client = None + self.url = None + self.status_code = [200, 201, 202, 204] + + self.body = {} + self.query_parameters = {} + self.query_parameters['api-version'] = '2019-05-13' + self.header_parameters = {} + self.header_parameters['Content-Type'] = 'application/json; charset=utf-8' + + super(VMBackupPolicy, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True + ) + + def get_url(self): + return '/subscriptions/' \ + + self.subscription_id \ + + '/resourceGroups/' \ + + self.resource_group \ + + '/providers/Microsoft.RecoveryServices' \ + + '/vaults' + '/' \ + + self.vault_name + '/' \ + + "backupPolicies/" \ + + self.name + + def set_schedule_run_time(self): + return time.strftime("%Y-%m-%d", time.gmtime()) + "T" + self.time + ":00Z" + + def get_body(self): + self.log('backup attributes {0}'.format(self.body)) + self.time = self.set_schedule_run_time() + schedule_policy = dict() + schedule_policy['schedulePolicyType'] = 'SimpleSchedulePolicy' + schedule_policy['scheduleRunFrequency'] = 'Weekly' + schedule_policy['scheduleRunTimes'] = [self.time] + schedule_policy['scheduleRunDays'] = self.weekdays + + weekly_schedule = dict() + weekly_schedule['daysOfTheWeek'] = ['Monday'] + weekly_schedule['retentionTimes'] = [self.time] + weekly_schedule['retentionDuration'] = dict() + weekly_schedule['retentionDuration']['count'] = self.count + weekly_schedule['retentionDuration']['durationType'] = 'Weeks' + + monthly_schedule = dict() + monthly_schedule['retentionScheduleFormatType'] = 'Weekly' + monthly_schedule['retentionScheduleWeekly'] = dict() + monthly_schedule['retentionScheduleWeekly']['daysOfTheWeek'] = self.weekdays + monthly_schedule['retentionScheduleWeekly']['weeksOfTheMonth'] = self.weeks + monthly_schedule['retentionTimes'] = [self.time] + monthly_schedule['retentionDuration'] = dict() + monthly_schedule['retentionDuration']['count'] = self.count + monthly_schedule['retentionDuration']['durationType'] = 'Months' + + yearly_schedule = dict() + yearly_schedule['retentionScheduleFormatType'] = 'Weekly' + yearly_schedule['monthsOfYear'] = self.months + yearly_schedule['retentionScheduleWeekly'] = dict() + yearly_schedule['retentionScheduleWeekly']['daysOfTheWeek'] = self.weekdays + yearly_schedule['retentionScheduleWeekly']['weeksOfTheMonth'] = self.weeks + yearly_schedule['retentionTimes'] = [self.time] + yearly_schedule['retentionDuration'] = dict() + yearly_schedule['retentionDuration']['count'] = self.count + yearly_schedule['retentionDuration']['durationType'] = 'Years' + + body = dict() + body['properties'] = dict() + body['properties']['backupManagementType'] = 'AzureIaasVM' + body['properties']['timeZone'] = 'Pacific Standard Time' + body['properties']['schedulePolicy'] = schedule_policy + body['properties']['retentionPolicy'] = dict() + body['properties']['retentionPolicy']['retentionPolicyType'] = 'LongTermRetentionPolicy' + body['properties']['retentionPolicy']['weeklySchedule'] = weekly_schedule + body['properties']['retentionPolicy']['monthlySchedule'] = monthly_schedule + body['properties']['retentionPolicy']['yearlySchedule'] = yearly_schedule + return body + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()): + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + self.body[key] = kwargs[key] + + self.inflate_parameters(self.module_arg_spec, self.body, 0) + + self.url = self.get_url() + self.body = self.get_body() + old_response = None + response = None + + self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient, + base_url=self._cloud_environment.endpoints.resource_manager) + + old_response = self.get_resource() + + changed = False + if self.state == 'present': + if old_response is False: + response = self.create_vm_backup_policy() + changed = True + else: + response = old_response + if self.state == 'absent': + changed = True + response = self.delete_vm_backup_policy() + self.results['response'] = response + self.results['changed'] = changed + + return self.results + + def create_vm_backup_policy(self): + # self.log('Creating VM Backup Policy {0}'.format(self.)) + try: + response = self.mgmt_client.query( + self.url, + 'PUT', + self.query_parameters, + self.header_parameters, + self.body, + self.status_code, + 600, + 30, + ) + except Exception as e: + self.log('Error in creating Backup Policy.') + self.fail('Error in creating Backup Policy {0}'.format(str(e))) + + try: + response = json.loads(response.text) + except Exception: + response = {'text': response.text} + + return response + + def delete_vm_backup_policy(self): + # self.log('Deleting Backup Policy {0}'.format(self.)) + try: + response = self.mgmt_client.query( + self.url, + 'DELETE', + self.query_parameters, + self.header_parameters, + None, + self.status_code, + 600, + 30, + ) + except Exception as e: + self.log('Error attempting to delete Azure Backup policy.') + self.fail('Error attempting to delete Azure Backup policy: {0}'.format(str(e))) + + try: + response = json.loads(response.text) + except Exception: + response = {'text': response.text} + return response + + def get_resource(self): + # self.log('Fetch Backup Policy Details {0}'.format(self.)) + found = False + try: + response = self.mgmt_client.query( + self.url, + 'GET', + self.query_parameters, + self.header_parameters, + None, + self.status_code, + 600, + 30, + ) + found = True + except Exception as e: + self.log('Backup policy does not exist.') + if found is True: + response = json.loads(response.text) + return response + else: + return False + + +def main(): + VMBackupPolicy() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_vmbackuppolicy_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_vmbackuppolicy_info.py new file mode 100644 index 000000000..18aaf718c --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_vmbackuppolicy_info.py @@ -0,0 +1,257 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 Suyeb Ansari (@suyeb786) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_vmbackuppolicy_info +version_added: '1.1.0' +short_description: Fetch Backup Policy Details +description: + - Get Backup Policy Details. +options: + name: + description: + - Policy Name. + required: true + type: str + resource_group: + description: + - The name of the resource group. + required: true + type: str + vault_name: + description: + - Recovery Service Vault Name. + required: true + type: str +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags +author: + - Suyeb Ansari (@suyeb786) +''' + +EXAMPLES = ''' + azure_rm_backvmuppolicy_info: + name: 'myBackupPolicy' + vault_name: 'myVault' + resource_group: 'myResourceGroup' +''' + +RETURN = ''' +response: + description: + - The response about the current state of the backup policy. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: "/subscriptions/xxxxxxx/resourceGroups/resourcegroup_name/ \ + providers/Microsoft.RecoveryServices/vaults/myVault/backupPolicies/myBackup" + name: + description: + - Backup Policy Name. + returned: always + type: str + sample: "myBackup" + properties: + description: + - The backup policy properties. + returned: always + type: dict + sample: { + "backupManagementType": "AzureIaasVM", + "schedulePolicy": { + "schedulePolicyType": "SimpleSchedulePolicy", + "scheduleRunFrequency": "Weekly", + "scheduleRunDays": [ + "Monday", + "Wednesday", + "Thursday" + ], + "scheduleRunTimes": [ + "2018-01-24T10:00:00Z" + ], + "scheduleWeeklyFrequency": 0 + }, + "retentionPolicy": { + "retentionPolicyType": "LongTermRetentionPolicy", + "weeklySchedule": { + "daysOfTheWeek": [ + "Monday", + "Wednesday", + "Thursday" + ], + "retentionTimes": [ + "2018-01-24T10:00:00Z" + ], + "retentionDuration": { + "count": 1, + "durationType": "Weeks" + } + }, + "monthlySchedule": { + "retentionScheduleFormatType": "Weekly", + "retentionScheduleWeekly": { + "daysOfTheWeek": [ + "Wednesday", + "Thursday" + ], + "weeksOfTheMonth": [ + "First", + "Third" + ] + }, + "retentionTimes": [ + "2018-01-24T10:00:00Z" + ], + "retentionDuration": { + "count": 2, + "durationType": "Months" + } + }, + "yearlySchedule": { + "retentionScheduleFormatType": "Weekly", + "monthsOfYear": [ + "February", + "November" + ], + "retentionScheduleWeekly": { + "daysOfTheWeek": [ + "Monday", + "Thursday" + ], + "weeksOfTheMonth": [ + "Fourth" + ] + }, + "retentionTimes": [ + "2018-01-24T10:00:00Z" + ], + "retentionDuration": { + "count": 4, + "durationType": "Years" + } + } + }, + "timeZone": "Pacific Standard Time", + "protectedItemsCount": 0 + } + type: + description: + - Resource type. + returned: always + type: str + sample: "Microsoft.RecoveryServices/vaults/backupPolicies" +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_rest import GenericRestClient +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt +import json + + +class BackupPolicyVMInfo(AzureRMModuleBaseExt): + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + vault_name=dict( + type='str', + required=True + ) + ) + + self.resource_group = None + self.name = None + self.vault_name = None + + self.results = dict(changed=False) + self.mgmt_client = None + self.url = None + self.status_code = [200, 202] + + self.query_parameters = {} + self.query_parameters['api-version'] = '2019-05-13' + self.header_parameters = {} + self.header_parameters['Content-Type'] = 'application/json; charset=utf-8' + + super(BackupPolicyVMInfo, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True + ) + + def get_url(self): + return '/subscriptions/' \ + + self.subscription_id \ + + '/resourceGroups/' \ + + self.resource_group \ + + '/providers/Microsoft.RecoveryServices' \ + + '/vaults' + '/' \ + + self.vault_name + '/' \ + + "backupPolicies/" \ + + self.name + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()): + if hasattr(self, key): + setattr(self, key, kwargs[key]) + + self.url = self.get_url() + + self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient, + base_url=self._cloud_environment.endpoints.resource_manager) + + response = self.get_resource() + changed = False + self.results['response'] = response + self.results['changed'] = changed + + return self.results + + def get_resource(self): + # self.log('Fetch Backup Policy Details {0}'.format(self.)) + try: + response = self.mgmt_client.query( + self.url, + 'GET', + self.query_parameters, + self.header_parameters, + None, + self.status_code, + 600, + 30, + ) + found = True + except Exception as e: + self.log('Backup policy does not exist.') + self.fail('Error in fetching VM Backup Policy {0}'.format(str(e))) + try: + response = json.loads(response.text) + except Exception: + response = {'text': response.text} + + return response + + +def main(): + BackupPolicyVMInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_vmssnetworkinterface_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_vmssnetworkinterface_info.py new file mode 100644 index 000000000..e7f2b8474 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_vmssnetworkinterface_info.py @@ -0,0 +1,399 @@ +#!/usr/bin/python +# +# Copyright (c) 2023 xuzhang3 (@xuzhang3), Fred-sun (@Fred-sun) + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: azure_rm_vmssnetworkinterface_info + +version_added: "1.15.0" + +short_description: Get information about network interface in virtul machine scale + +description: + - Get information about network interface in virtual machine scale set. + +options: + name: + description: + - The name of the network interface. + - If configure I(name), you must set the parameters I(vm_index). + type: str + vmss_name: + description: + - The name of the virtual machine scale set. + type: str + required: True + vm_index: + description: + - The virtual machine index, such as I(vm_index=0). + type: str + resource_group: + description: + - Name of the resource group. + type: str + required: True + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - xuzhang3 (@xuzhang3) + - Fred-sun (@Fred-sun) + +''' + +EXAMPLES = ''' + - name: Get information by the network name + azure_rm_vmssnetworkinterface_info: + resource_group: myResourceGroup + name: nic001 + vmss_name: testVMSS + vm_index: 0 + + - name: Get all network interface information in virtual machine scale set + azure_rm_vmssnetworkinterface_info: + resource_group: myResourceGroup + vmss_name: testVMSS + + - name: Get all network interface information in the same virtual machine index. + azure_rm_vmssnetworkinterface_info: + resource_group: myResourceGroup + vmss_name: testVMSS + vm_index: 1 +''' + +RETURN = ''' +vmss_networkinterfaces: + description: + - List of network interface dicts. Each dict contains parameters can be passed to M(azure.azcollection.azure_rm_vmssnetworkinterface) module. + type: complex + returned: always + contains: + id: + description: + - Id of the network interface. + returned: always + type: str + sample: "/subscriptions/xxx-xxx/resourceGroups/RG/providers/Microsoft.Compute/virtualMachineScaleSets/fredvmss/virtualMachines/1/networkInterfaces/nic01" + resource_group: + description: + - Name of a resource group where the network interface exists. + returned: always + type: str + sample: RG + name: + description: + - Name of the network interface. + type: str + returned: always + sample: nic01 + location: + description: + - Azure location. + type: str + returned: always + sample: eastus + virtual_network: + description: + - An existing virtual network with which the network interface will be associated. + - It is a dict which contains I(name) and I(resource_group) of the virtual network. + type: dict + returned: always + sample: {"name": "vnet01", "resource_group": "RG"} + subnet: + description: + - Name of an existing subnet within the specified virtual network. + type: str + returned: always + sample: default + tags: + description: + - Tags of the network interface. + type: dict + returned: always + sample: {"key1": "value1"} + ip_configurations: + description: + - List of IP configurations, if contains multiple configurations. + type: complex + returned: always + contains: + name: + description: + - Name of the IP configuration. + type: str + returned: always + sample: defaultIpConfiguration + private_ip_address: + description: + - Private IP address for the IP configuration. + type: str + returned: always + sample: 10.3.0.5 + private_ip_allocation_method: + description: + - Private IP allocation method. + returned: always + type: str + sample: Dynamic + public_ip_address: + description: + - Name of the public IP address. None for disable IP address. + returned: always + type: str + sample: null + public_ip_allocation_method: + description: + - Public IP allocation method. + returned: always + type: str + sample: null + load_balancer_backend_address_pools: + description: + - List of existing load-balancer backend address pools associated with the network interface. + returned: always + type: str + sample: null + application_gateway_backend_address_pools: + description: + - List of existing application gateway backend address pools associated with the network interface. + returned: always + type: str + sample: null + primary: + description: + - Whether the IP configuration is the primary one in the list. + returned: always + type: bool + sample: True + application_security_groups: + description: + - List of Application security groups. + returned: always + type: str + sample: /subscriptions//resourceGroups//providers/Microsoft.Network/applicationSecurityGroups/myASG + enable_accelerated_networking: + description: + - Specifies whether the network interface should be created with the accelerated networking feature or not. + type: bool + returned: always + sample: True + create_with_security_group: + description: + - Specifies whether a default security group should be be created with the NIC. Only applies when creating a new NIC. + type: bool + returned: always + sample: True + security_group: + description: + - A security group resource ID with which to associate the network interface. + type: str + returned: always + sample: /subscriptions/xxx-xxx/resourceGroups/RG/providers/Microsoft.Network/networkSecurityGroups/nic01 + enable_ip_forwarding: + description: + - Whether to enable IP forwarding + type: bool + returned: always + sample: True + dns_servers: + description: + - Which DNS servers should the NIC lookup. + - List of IP addresses. + type: list + returned: always + sample: [] + mac_address: + description: + - The MAC address of the network interface. + type: str + returned: always + sample: 00-0D-3A-17-EC-36 + provisioning_state: + description: + - The provisioning state of the network interface. + type: str + returned: always + sample: Succeeded + dns_settings: + description: + - The DNS settings in network interface. + type: complex + returned: always + contains: + dns_servers: + description: + - List of DNS servers IP addresses. + returned: always + type: list + sample: [] + applied_dns_servers: + description: + - If the VM that uses this NIC is part of an Availability Set, then this list will have the union of all DNS servers + from all NICs that are part of the Availability Set. This property is what is configured on each of those VMs. + returned: always + type: list + sample: [] + internal_dns_name_label: + description: + - Relative DNS name for this NIC used for internal communications between VMs in the same virtual network. + returned: always + type: str + sample: null + internal_fqdn: + description: + - Fully qualified DNS name supporting internal communications between VMs in the same virtual network. + returned: always + type: str + sample: null +''' # NOQA +try: + from azure.core.exceptions import ResourceNotFoundError +except Exception: + # This is handled in azure_rm_common + pass + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase, azure_id_to_dict + + +AZURE_OBJECT_CLASS = 'VMSSNetworkInterface' + + +def nic_to_dict(nic): + ip_configurations = [ + dict( + name=config.name, + private_ip_address=config.private_ip_address, + private_ip_allocation_method=config.private_ip_allocation_method, + primary=config.primary if config.primary else False, + load_balancer_backend_address_pools=([item.id for item in config.load_balancer_backend_address_pools] + if config.load_balancer_backend_address_pools else None), + application_gateway_backend_address_pools=([item.id for item in config.application_gateway_backend_address_pools] + if config.application_gateway_backend_address_pools else None), + public_ip_address=config.public_ip_address.id if config.public_ip_address else None, + public_ip_allocation_method=config.public_ip_address.public_ip_allocation_method if config.public_ip_address else None, + application_security_groups=([asg.id for asg in config.application_security_groups] + if config.application_security_groups else None) + ) for config in nic.ip_configurations + ] + config = nic.ip_configurations[0] if len(nic.ip_configurations) > 0 else None + subnet_dict = azure_id_to_dict(config.subnet.id) if config and config.subnet else None + subnet = subnet_dict.get('subnets') if subnet_dict else None + virtual_network = dict( + resource_group=subnet_dict.get('resourceGroups'), + name=subnet_dict.get('virtualNetworks')) if subnet_dict else None + return dict( + id=nic.id, + resource_group=azure_id_to_dict(nic.id).get('resourceGroups'), + name=nic.name, + subnet=subnet, + virtual_network=virtual_network, + location=nic.location, + tags=nic.tags, + security_group=nic.network_security_group.id if nic.network_security_group else None, + dns_settings=dict( + dns_servers=nic.dns_settings.dns_servers, + applied_dns_servers=nic.dns_settings.applied_dns_servers, + internal_dns_name_label=nic.dns_settings.internal_dns_name_label, + internal_fqdn=nic.dns_settings.internal_fqdn + ), + ip_configurations=ip_configurations, + mac_address=nic.mac_address, + enable_ip_forwarding=nic.enable_ip_forwarding, + provisioning_state=nic.provisioning_state, + enable_accelerated_networking=nic.enable_accelerated_networking, + dns_servers=nic.dns_settings.dns_servers, + ) + + +class AzureRMVMSSNetworkInterfaceInfo(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type='str'), + resource_group=dict(type='str', required=True), + vmss_name=dict(type='str', required=True), + vm_index=dict(type='str'), + ) + + self.results = dict( + changed=False, + ) + + self.name = None + self.resource_group = None + self.vmss_name = None + self.vm_index = None + + super(AzureRMVMSSNetworkInterfaceInfo, self).__init__(self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + facts_module=True + ) + + def exec_module(self, **kwargs): + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + results = [] + if self.name is not None: + if self.vm_index is not None: + results = self.get_item() + else: + self.fail("Parameter error: vm_index required when filtering by name.") + elif self.vm_index is not None: + results = self.list_vm_index() + else: + results = self.list_vmss() + + self.results['vmss_networkinterfaces'] = self.to_dict_list(results) + return self.results + + def get_item(self): + res = None + self.log("Get the specified network interface in a virtual machine scale set.") + try: + res = self.network_client.network_interfaces.get_virtual_machine_scale_set_network_interface(resource_group_name=self.resource_group, + virtual_machine_scale_set_name=self.vmss_name, + virtualmachine_index=self.vm_index, + network_interface_name=self.name) + except ResourceNotFoundError: + pass + + return [res] if res is not None else [] + + def list_vm_index(self): + try: + res = self.network_client.network_interfaces.list_virtual_machine_scale_set_vm_network_interfaces(resource_group_name=self.resource_group, + virtual_machine_scale_set_name=self.vmss_name, + virtualmachine_index=self.vm_index) + return list(res) + except Exception as exc: + self.fail("Error listing by resource group {0} - {1}".format(self.resource_group, str(exc))) + + def list_vmss(self): + self.log('List all') + try: + response = self.network_client.network_interfaces.list_virtual_machine_scale_set_network_interfaces(resource_group_name=self.resource_group, + virtual_machine_scale_set_name=self.vmss_name) + return list(response) + except Exception as exc: + self.fail("Error listing all - {0}".format(str(exc))) + + def to_dict_list(self, raws): + return [nic_to_dict(item) for item in raws] if raws else [] + + +def main(): + AzureRMVMSSNetworkInterfaceInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_vpnsite.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_vpnsite.py new file mode 100644 index 000000000..e72c5474e --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_vpnsite.py @@ -0,0 +1,606 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 Gu Fred-Sun, (@Fred-Sun) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_vpnsite +version_added: '1.5.0' +short_description: Manage Azure VpnSite instance +description: + - Create, update and delete instance of Azure VpnSite. +options: + resource_group: + description: + - The resource group name of the VpnSite. + required: true + type: str + location: + description: + - The location of the VpnSite + type: str + name: + description: + - The name of the VpnSite. + required: true + type: str + virtual_wan: + description: + - The VirtualWAN to which the vpnSite belongs. + type: dict + suboptions: + id: + description: + - The resource ID of the related virtual wan. + type: str + device_properties: + description: + - The device properties. + type: dict + suboptions: + device_vendor: + description: + - Name of the device Vendor. + type: str + device_model: + description: + - Model of the device. + type: str + link_speed_in_mbps: + description: + - Link speed. + type: int + ip_address: + description: + - The ip-address for the vpn-site. + type: str + site_key: + description: + - The key for vpn-site that can be used for connections. + type: str + address_space: + description: + - The AddressSpace that contains an array of IP address ranges. + type: dict + suboptions: + address_prefixes: + description: + - A list of address blocks reserved for this virtual network in CIDR notation. + type: list + elements: str + bgp_properties: + description: + - The set of bgp properties. + type: dict + suboptions: + asn: + description: + - The BGP speaker's ASN. + type: int + bgp_peering_address: + description: + - The BGP peering address and BGP identifier of this BGP speaker. + type: str + peer_weight: + description: + - The weight added to routes learned from this BGP speaker. + type: int + bgp_peering_addresses: + description: + - BGP peering address with IP configuration ID for virtual network gateway. + type: list + elements: dict + suboptions: + ipconfiguration_id: + description: + - The ID of IP configuration which belongs to gateway. + type: str + default_bgp_ip_addresses: + description: + - The list of default BGP peering addresses which belong to IP configuration. + type: list + elements: str + custom_bgp_ip_addresses: + description: + - The list of custom BGP peering addresses which belong to IP configuration. + type: list + elements: str + tunnel_ip_addresses: + description: + - The list of tunnel public IP addresses which belong to IP configuration. + type: list + elements: str + is_security_site: + description: + - IsSecuritySite flag. + type: bool + vpn_site_links: + description: + - List of all vpn site links. + type: list + elements: dict + suboptions: + name: + description: + - The name of the resource that is unique within a resource group. + - This name can be used to access the resource. + type: str + link_properties: + description: + - The link provider properties. + type: dict + suboptions: + link_provider_name: + description: + - Name of the link provider. + type: str + link_speed_in_mbps: + description: + - Link speed. + type: int + ip_address: + description: + - The IP address for the vpn site link. + type: str + fqdn: + description: + - FQDN of vpn-site-link. + type: str + bgp_properties: + description: + - The set of bgp properties. + type: dict + suboptions: + asn: + description: + - The BGP speaker's ASN. + type: int + bgp_peering_address: + description: + - The BGP peering address and BGP identifier of this BGP speaker. + type: str + o365_policy: + description: + - Office365 Policy. + type: dict + suboptions: + break_out_categories: + description: + - Office365 breakout categories. + type: dict + suboptions: + allow: + description: + - Flag to control allow category. + type: bool + optimize: + description: + - Flag to control optimize category. + type: bool + default: + description: + - Flag to control default category. + type: bool + state: + description: + - Assert the state of the VpnSite. + - Use C(present) to create or update an VpnSite and C(absent) to delete it. + default: present + type: str + choices: + - absent + - present +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags +author: + - Fred-Sun (@Fred-Sun) + +''' + +EXAMPLES = ''' + - name: Create VpnSite + azure_rm_vpnsite: + resource_group: myResourceGroup + name: vpnSite_name + + - name: Delete Vpn Site + azure_rm_vpnsite: + resource_group: myResourceGroup + name: vpnSite_name + +''' + +RETURN = ''' +state: + description: + - Current state of the vpn site. + type: complex + returned: success + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: /subscriptions/xxx-xxx/resourceGroups/v-xisuRG/providers/Microsoft.Network/vpnSites/vpn_site_name + name: + description: + - Resource name. + returned: always + type: str + sample: vpn_site_name + type: + description: + - Resource type. + returned: always + type: str + sample: Microsoft.Network/vpnSites + location: + description: + - Resource location. + returned: always + type: str + sample: eastus + tags: + description: + - Resource tags. + returned: always + type: dict + sample: { 'key1': 'value1'} + etag: + description: + - A unique read-only string that changes whenever the resource is updated. + returned: always + type: str + sample: 8d7415fe-d92c-4331-92ea-460aadfb9648 + virtual_wan: + description: + - The VirtualWAN to which the vpnSite belongs. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: /subscriptions/xxx-xxx/resourceGroups/v-xisuRG/providers/Microsoft.Network/virtualWans/virtualwan_name + device_properties: + description: + - The device properties. + returned: always + type: complex + contains: + device_vendor: + description: + - Name of the device Vendor. + returned: always + type: str + sample: {"link_speed_in_mbps": 0} + provisioning_state: + description: + - The provisioning state of the VPN site resource. + returned: always + type: str + sample: "Succeeded" + is_security_site: + description: + - IsSecuritySite flag. + returned: always + type: bool + sample: false +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt +try: + from msrestazure.azure_operation import AzureOperationPoller + from azure.core.exceptions import ResourceNotFoundError + from azure.core.polling import LROPoller +except ImportError: + # This is handled in azure_rm_common + pass + + +class Actions: + NoAction, Create, Update, Delete = range(4) + + +class AzureRMVpnSite(AzureRMModuleBaseExt): + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + location=dict( + type='str' + ), + virtual_wan=dict( + type='dict', + disposition='/virtual_wan', + options=dict( + id=dict( + type='str', + disposition='id' + ) + ) + ), + device_properties=dict( + type='dict', + disposition='/device_properties', + options=dict( + device_vendor=dict( + type='str', + disposition='device_vendor' + ), + device_model=dict( + type='str', + disposition='device_model' + ), + link_speed_in_mbps=dict( + type='int', + disposition='link_speed_in_mbps' + ) + ) + ), + ip_address=dict( + type='str', + disposition='/ip_address' + ), + site_key=dict( + type='str', + no_log=True, + disposition='/site_key' + ), + address_space=dict( + type='dict', + disposition='/address_space', + options=dict( + address_prefixes=dict( + type='list', + disposition='address_prefixes', + elements='str' + ) + ) + ), + bgp_properties=dict( + type='dict', + disposition='/bgp_properties', + options=dict( + asn=dict( + type='int', + disposition='asn' + ), + bgp_peering_address=dict( + type='str', + disposition='bgp_peering_address' + ), + peer_weight=dict( + type='int', + disposition='peer_weight' + ), + bgp_peering_addresses=dict( + type='list', + disposition='bgp_peering_addresses', + elements='dict', + options=dict( + ipconfiguration_id=dict( + type='str', + disposition='ipconfiguration_id' + ), + default_bgp_ip_addresses=dict( + type='list', + updatable=False, + disposition='default_bgp_ip_addresses', + elements='str' + ), + custom_bgp_ip_addresses=dict( + type='list', + disposition='custom_bgp_ip_addresses', + elements='str' + ), + tunnel_ip_addresses=dict( + type='list', + updatable=False, + disposition='tunnel_ip_addresses', + elements='str' + ) + ) + ) + ) + ), + is_security_site=dict( + type='bool', + disposition='/is_security_site' + ), + vpn_site_links=dict( + type='list', + disposition='/vpn_site_links', + elements='dict', + options=dict( + name=dict( + type='str', + disposition='name' + ), + link_properties=dict( + type='dict', + disposition='link_properties', + options=dict( + link_provider_name=dict( + type='str', + disposition='link_provider_name' + ), + link_speed_in_mbps=dict( + type='int', + disposition='link_speed_in_mbps' + ) + ) + ), + ip_address=dict( + type='str', + disposition='ip_address' + ), + fqdn=dict( + type='str', + disposition='fqdn' + ), + bgp_properties=dict( + type='dict', + disposition='bgp_properties', + options=dict( + asn=dict( + type='int', + disposition='asn' + ), + bgp_peering_address=dict( + type='str', + disposition='bgp_peering_address' + ) + ) + ) + ) + ), + o365_policy=dict( + type='dict', + disposition='/o365_policy', + options=dict( + break_out_categories=dict( + type='dict', + disposition='break_out_categories', + options=dict( + allow=dict( + type='bool', + disposition='allow' + ), + optimize=dict( + type='bool', + disposition='optimize' + ), + default=dict( + type='bool', + disposition='default' + ) + ) + ) + ) + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + self.resource_group = None + self.name = None + self.location = None + self.body = {} + + self.results = dict(changed=False) + self.state = None + self.to_do = Actions.NoAction + + super(AzureRMVpnSite, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + for key in list(self.module_arg_spec.keys()): + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + self.body[key] = kwargs[key] + + self.inflate_parameters(self.module_arg_spec, self.body, 0) + + resource_group = self.get_resource_group(self.resource_group) + if self.location is None: + # Set default location + self.location = resource_group.location + self.body['location'] = self.location + + old_response = None + response = None + + old_response = self.get_resource() + + if not old_response: + if self.state == 'present': + self.to_do = Actions.Create + else: + if self.state == 'absent': + self.to_do = Actions.Delete + else: + modifiers = {} + self.create_compare_modifiers(self.module_arg_spec, '', modifiers) + self.results['modifiers'] = modifiers + self.results['compare'] = [] + if not self.default_compare(modifiers, self.body, old_response, '', self.results): + self.to_do = Actions.Update + + if (self.to_do == Actions.Create) or (self.to_do == Actions.Update): + self.results['changed'] = True + if self.check_mode: + return self.results + response = self.create_update_resource() + elif self.to_do == Actions.Delete: + self.results['changed'] = True + if self.check_mode: + return self.results + self.delete_resource() + else: + self.results['changed'] = False + response = old_response + + if response is not None: + self.results['state'] = response + return self.results + + def create_update_resource(self): + try: + response = self.network_client.vpn_sites.begin_create_or_update(resource_group_name=self.resource_group, + vpn_site_name=self.name, + vpn_site_parameters=self.body) + if isinstance(response, AzureOperationPoller) or isinstance(response, LROPoller): + response = self.get_poller_result(response) + except Exception as exc: + self.log('Error attempting to create the VpnSite instance.') + self.fail('Error creating the VpnSite instance: {0}'.format(str(exc))) + return response.as_dict() + + def delete_resource(self): + try: + response = self.network_client.vpn_sites.begin_delete(resource_group_name=self.resource_group, + vpn_site_name=self.name) + except Exception as e: + self.log('Error attempting to delete the VpnSite instance.') + self.fail('Error deleting the VpnSite instance: {0}'.format(str(e))) + + return True + + def get_resource(self): + try: + response = self.network_client.vpn_sites.get(resource_group_name=self.resource_group, + vpn_site_name=self.name) + except ResourceNotFoundError as e: + return False + return response.as_dict() + + +def main(): + AzureRMVpnSite() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_vpnsite_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_vpnsite_info.py new file mode 100644 index 000000000..0799959ab --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_vpnsite_info.py @@ -0,0 +1,263 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 Fred-Sun, (@Fred-Sun) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_vpnsite_info +version_added: '1.5.0' +short_description: Get VpnSite info +description: + - Get info of VpnSite. +options: + resource_group: + description: + - The resource group name of the VpnSite. + type: str + name: + description: + - The name of the VpnSite being retrieved. + type: str +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags +author: + - Fred-Sun (@Fred-Sun) + +''' + +EXAMPLES = ''' + - name: Get Vpn Site Info by name + azure_rm_vpnsite_info: + resource_group: myResourceGroup + name: vwan_site_name + + - name: Get Vpn Site List By ResourceGroup + azure_rm_vpnsite_info: + resource_group: myResourceGroup + + - name: Get Vpn Site List By Subscription + azure_rm_vpnsite_info: + +''' + +RETURN = ''' +vpn_sites: + description: + - A list of dict results where the key is the name of the VpnSite and the values are the facts for that VpnSite. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: /subscriptions/xxx-xxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/vpnSites/vwam_site_name + name: + description: + - Resource name. + returned: always + type: str + sample: vwan_site_name + type: + description: + - Resource type. + returned: always + type: str + sample: Microsoft.Network/vpnSites + location: + description: + - Resource location. + returned: always + type: str + sample: eastus + tags: + description: + - Resource tags. + returned: always + type: dict + sample: { "key1":"value1"} + etag: + description: + - A unique read-only string that changes whenever the resource is updated. + returned: always + type: str + sample: 1d8c0731-adc6-4022-9c70-3c389cd73e2a + virtual_wan: + description: + - The VirtualWAN to which the vpnSite belongs. + returned: always + type: dict + sample: {"id": "/subscriptions/xxx-xxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualWans/vwan_name"} + device_properties: + description: + - The device properties. + returned: always + type: dict + sample: {"device_vendor": "myVM", "link_speed_in_mbps": 0} + address_space: + description: + - The AddressSpace that contains an array of IP address ranges. + returned: always + type: dict + sample: {"address_prefixes": ["10.0.0.0/24",]} + provisioning_state: + description: + - The provisioning state of the VPN site resource. + returned: always + type: str + sample: "succeeded" + is_security_site: + description: + - IsSecuritySite flag. + returned: always + type: bool + sample: false + vpn_site_links: + description: + - List of all vpn site links. + returned: always + type: complex + contains: + name: + description: + - The name of the resource that is unique within a resource group. + - This name can be used to access the resource. + returned: always + type: str + sample: azureuser + link_properties: + description: + - The link provider properties. + returned: always + type: dict + sample: {"link_provider_name": "azureuser", "link_speed_in_mbps": 100} + ip_address: + description: + - The ip-address for the vpn-site-link. + returned: always + type: str + sample: 192.168.33.223 + etag: + description: + - A unique read-only string that changes whenever the resource is updated. + returned: always + type: str + sample: 1d8c0731-adc6-4022-9c70-3c389cd73e2a + id: + description: + - Resource ID. + returned: always + type: str + sample: /subscriptions/xxx-xxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/vpnSites/vwam_site_name + type: + description: + - Resource type. + returned: always + type: str + sample: Microsoft.Network/vpnSites + o365_policy: + description: + - Office365 Policy. + returned: always + type: dict + sample: {"break_out_categories": {"allow": false,"default": false,"optimize": false}} + +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBase +try: + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMVpnSiteInfo(AzureRMModuleBase): + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str' + ), + name=dict( + type='str' + ) + ) + + self.resource_group = None + self.name = None + + self.results = dict(changed=False) + + super(AzureRMVpnSiteInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=True) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if (self.resource_group is not None and self.name is not None): + self.results['vpn_sites'] = self.format_item(self.get()) + elif (self.resource_group is not None): + self.results['vpn_sites'] = self.format_item(self.list_by_resource_group()) + else: + self.results['vpn_sites'] = self.format_item(self.list()) + return self.results + + def get(self): + response = None + + try: + response = self.network_client.vpn_sites.get(resource_group_name=self.resource_group, + vpn_site_name=self.name) + except ResourceNotFoundError as e: + self.log('Could not get info for @(Model.ModuleOperationNameUpper).') + + return response + + def list_by_resource_group(self): + response = None + + try: + response = self.network_client.vpn_sites.list_by_resource_group(resource_group_name=self.resource_group) + except ResourceNotFoundError as e: + self.log('Could not get info for @(Model.ModuleOperationNameUpper).') + + return response + + def list(self): + response = None + + try: + response = self.network_client.vpn_sites.list() + except ResourceNotFoundError as e: + self.log('Could not get info for @(Model.ModuleOperationNameUpper).') + + return response + + def format_item(self, item): + if hasattr(item, 'as_dict'): + return [item.as_dict()] + elif item is not None: + result = [] + items = list(item) + for tmp in items: + result.append(tmp.as_dict()) + return result + else: + return None + + +def main(): + AzureRMVpnSiteInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_vpnsitelink_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_vpnsitelink_info.py new file mode 100644 index 000000000..5643d785f --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_vpnsitelink_info.py @@ -0,0 +1,205 @@ +#!/usr/bin/python +# +# Copyright (c) 2020 GuopengLin, (@t-glin) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_vpnsitelink_info +version_added: '1.5.0' +short_description: Get VpnSiteLink info +description: + - Get info of Vpn Site Link relate infomation. +options: + resource_group: + description: + - The resource group name of the VpnSite. + required: true + type: str + vpn_site_name: + description: + - The name of the Vpn Site. + required: true + type: str + name: + description: + - The name of the VpnSiteLink being retrieved. + type: str +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags +author: + - Fred-Sun (@Fred-Sun) + - Haiyuan Zhang (@haiyuazhang) + +''' + +EXAMPLES = ''' + - name: Get Vpn Site Link info by the name + azure_rm_vpnsitelink_info: + resource_group: myResourceGroup + name: vpnSiteLink1 + vpn_site_name: vpnSite1 + + + - name: Get Vpn Site Links by the Vpn Site + azure_rm_vpnsitelink_info: + resource_group: myResourceGroup + vpn_site_name: vpnSite1 +''' + +RETURN = ''' +vpn_site_links: + description: + - A list of dict results where the key is the name of the VpnSiteLink and the values are the facts for that VpnSiteLink. + returned: always + type: complex + contains: + id: + description: + - Resource ID. + returned: always + type: str + sample: /subscriptions/xxx-xxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/vpnSites/fred/vpnSiteLinks/azureuser + etag: + description: + - A unique read-only string that changes whenever the resource is updated. + returned: always + type: str + sample: 1ec5c61b-d66f-4b1c-b7b5-f27d0a9ad9d3 + name: + description: + - The name of the resource that is unique within a resource group. + - This name can be used to access the resource. + returned: always + type: str + sample: azureuser + type: + description: + - Resource type. + returned: always + type: str + sample: Microsoft.Network/vpnSites/vpnSiteLinks + link_properties: + description: + - The link provider properties. + returned: always + type: complex + contains: + link_provider_name: + description: + - Name of the link provider. + returned: always + type: str + sample: azureuser + link_speed_in_mbps: + description: + - Link speed. + returned: always + type: int + sample: 100 + ip_address: + description: + - The ip-address for the vpn-site-link. + returned: always + type: str + sample: 192.168.33.223 + provisioning_state: + description: + - The provisioning state of the VPN site link resource. + returned: always + type: str + sample: Succeeded +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBase +try: + from azure.core.exceptions import ResourceNotFoundError +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMVpnSiteLinkInfo(AzureRMModuleBase): + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + vpn_site_name=dict( + type='str', + required=True + ), + name=dict( + type='str' + ) + ) + + self.resource_group = None + self.vpn_site_name = None + name = None + + self.results = dict(changed=False) + self.state = None + + super(AzureRMVpnSiteLinkInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=True) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if (self.resource_group is not None and self.vpn_site_name is not None and self.name is not None): + self.results['vpn_site_links'] = self.format_item(self.get()) + elif (self.resource_group is not None and self.vpn_site_name is not None): + self.results['vpn_site_links'] = self.format_item(self.list_by_vpn_site()) + return self.results + + def get(self): + response = None + + try: + response = self.network_client.vpn_site_links.get(resource_group_name=self.resource_group, + vpn_site_name=self.vpn_site_name, + vpn_site_link_name=self.name) + except ResourceNotFoundError as e: + self.log('Could not get info for @(Model.ModuleOperationNameUpper).') + + return response + + def list_by_vpn_site(self): + response = None + + try: + response = self.network_client.vpn_site_links.list_by_vpn_site(resource_group_name=self.resource_group, + vpn_site_name=self.vpn_site_name) + except ResourceNotFoundError as e: + self.log('Could not get info for @(Model.ModuleOperationNameUpper).') + + return response + + def format_item(self, item): + if hasattr(item, 'as_dict'): + return [item.as_dict()] + elif item is not None: + result = [] + items = list(item) + for tmp in items: + result.append(tmp.as_dict()) + return result + else: + return None + + +def main(): + AzureRMVpnSiteLinkInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webapp.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webapp.py new file mode 100644 index 000000000..50256ea0c --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webapp.py @@ -0,0 +1,1089 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Yunge Zhu, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_webapp +version_added: "0.1.2" +short_description: Manage Web App instances +description: + - Create, update and delete instance of Web App. + +options: + resource_group: + description: + - Name of the resource group to which the resource belongs. + required: True + name: + description: + - Unique name of the app to create or update. To create or update a deployment slot, use the {slot} parameter. + required: True + + location: + description: + - Resource location. If not set, location from the resource group will be used as default. + + plan: + description: + - App service plan. Required for creation. + - Can be name of existing app service plan in same resource group as web app. + - Can be the resource ID of an existing app service plan. For example + /subscriptions//resourceGroups//providers/Microsoft.Web/serverFarms/. + - Can be a dict containing five parameters, defined below. + - C(name), name of app service plan. + - C(resource_group), resource group of the app service plan. + - C(sku), SKU of app service plan, allowed values listed on U(https://azure.microsoft.com/en-us/pricing/details/app-service/linux/). + - C(is_linux), whether or not the app service plan is Linux. defaults to C(False). + - C(number_of_workers), number of workers for app service plan. + + frameworks: + description: + - Set of run time framework settings. Each setting is a dictionary. + - See U(https://docs.microsoft.com/en-us/azure/app-service/app-service-web-overview) for more info. + suboptions: + name: + description: + - Name of the framework. + - Supported framework list for Windows web app and Linux web app is different. + - Windows web apps support C(java), C(net_framework), C(php), C(python), and C(node) from June 2018. + - Windows web apps support multiple framework at the same time. + - Linux web apps support C(java), C(ruby), C(php), C(python), C(dotnetcore), and C(node) from June 2018. + - Linux web apps support only one framework. + - Java framework is mutually exclusive with others. + choices: + - java + - net_framework + - php + - python + - ruby + - dotnetcore + - node + version: + description: + - Version of the framework. For Linux web app supported value, see U(https://aka.ms/linux-stacks) for more info. + - C(net_framework) supported value sample, C(v4.0) for .NET 4.6 and C(v3.0) for .NET 3.5. + - C(php) supported value sample, C(5.5), C(5.6), C(7.0). + - C(python) supported value sample, C(2.7), C(3.8), C(3.10). + - C(node) supported value sample, C(6.6), C(6.9). + - C(dotnetcore) supported value sample, C(1.0), C(1.1), C(1.2). + - C(ruby) supported value sample, C(2.3). + - C(java) supported value sample, C(1.9) for Windows web app. C(1.8) for Linux web app. + settings: + description: + - List of settings of the framework. + suboptions: + java_container: + description: + - Name of Java container. + - Supported only when I(frameworks=java). Sample values C(Tomcat), C(Jetty). + java_container_version: + description: + - Version of Java container. + - Supported only when I(frameworks=java). + - Sample values for C(Tomcat), C(8.0), C(8.5), C(9.0). For C(Jetty,), C(9.1), C(9.3). + + container_settings: + description: + - Web app container settings. + suboptions: + name: + description: + - Name of the container, for example C(imagename:tag). + - To create a multi-container app, the name should be 'COMPOSE|' or 'KUBE|' followed by base64 encoded configuration. + registry_server_url: + description: + - Container registry server URL, for example C(mydockerregistry.io). + registry_server_user: + description: + - The container registry server user name. + registry_server_password: + description: + - The container registry server password. + + scm_type: + description: + - Repository type of deployment source, for example C(LocalGit), C(GitHub). + - List of supported values maintained at U(https://docs.microsoft.com/en-us/rest/api/appservice/webapps/createorupdate#scmtype). + + always_on: + description: + - Keeps the app loaded even when there's no traffic. + type: bool + + min_tls_version: + description: + - The minimum TLS encryption version required for the app. + type: str + choices: + - '1.0' + - '1.1' + - '1.2' + + ftps_state: + description: + - The state of the FTP/FTPS service. + type: str + choices: + - AllAllowed + - FtpsOnly + - Disabled + + deployment_source: + description: + - Deployment source for git. + suboptions: + url: + description: + - Repository url of deployment source. + + branch: + description: + - The branch name of the repository. + startup_file: + description: + - The web's startup file. + - Used only for Linux web apps. + + client_affinity_enabled: + description: + - Whether or not to send session affinity cookies, which route client requests in the same session to the same instance. + type: bool + default: True + + https_only: + description: + - Configures web site to accept only https requests. + type: bool + + app_settings: + description: + - Configure web app application settings. Suboptions are in key value pair format. + + purge_app_settings: + description: + - Purge any existing application settings. Replace web app application settings with app_settings. + type: bool + default: False + + app_state: + description: + - Start/Stop/Restart the web app. + type: str + choices: + - started + - stopped + - restarted + default: started + + state: + description: + - State of the Web App. + - Use C(present) to create or update a Web App and C(absent) to delete it. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Yunge Zhu (@yungezz) + +''' + +EXAMPLES = ''' + - name: Create a windows web app with non-exist app service plan + azure_rm_webapp: + resource_group: myResourceGroup + name: myWinWebapp + plan: + resource_group: myAppServicePlan_rg + name: myAppServicePlan + is_linux: false + sku: S1 + + - name: Create a docker web app with some app settings, with docker image + azure_rm_webapp: + resource_group: myResourceGroup + name: myDockerWebapp + plan: + resource_group: myAppServicePlan_rg + name: myAppServicePlan + is_linux: true + sku: S1 + number_of_workers: 2 + app_settings: + testkey: testvalue + testkey2: testvalue2 + container_settings: + name: ansible/ansible:ubuntu1404 + + - name: Create a docker web app with private acr registry + azure_rm_webapp: + resource_group: myResourceGroup + name: myDockerWebapp + plan: myAppServicePlan + app_settings: + testkey: testvalue + container_settings: + name: ansible/ubuntu1404 + registry_server_url: myregistry.io + registry_server_user: user + registry_server_password: pass + + - name: Create a multi-container web app + azure_rm_webapp: + resource_group: myResourceGroup + name: myMultiContainerWebapp + plan: myAppServicePlan + app_settings: + testkey: testvalue + container_settings: + name: "COMPOSE|{{ lookup('file', 'docker-compose.yml') | b64encode }}" + + - name: Create a linux web app with Node 6.6 framework + azure_rm_webapp: + resource_group: myResourceGroup + name: myLinuxWebapp + plan: + resource_group: myAppServicePlan_rg + name: myAppServicePlan + app_settings: + testkey: testvalue + frameworks: + - name: "node" + version: "6.6" + + - name: Create a windows web app with node, php + azure_rm_webapp: + resource_group: myResourceGroup + name: myWinWebapp + plan: + resource_group: myAppServicePlan_rg + name: myAppServicePlan + app_settings: + testkey: testvalue + frameworks: + - name: "node" + version: 6.6 + - name: "php" + version: "7.0" + + - name: Create a stage deployment slot for an existing web app + azure_rm_webapp: + resource_group: myResourceGroup + name: myWebapp/slots/stage + plan: + resource_group: myAppServicePlan_rg + name: myAppServicePlan + app_settings: + testkey:testvalue + + - name: Create a linux web app with java framework + azure_rm_webapp: + resource_group: myResourceGroup + name: myLinuxWebapp + plan: + resource_group: myAppServicePlan_rg + name: myAppServicePlan + app_settings: + testkey: testvalue + frameworks: + - name: "java" + version: "8" + settings: + java_container: "Tomcat" + java_container_version: "8.5" + + - name: Create a linux web app with python framework + azure_rm_webapp: + resource_group: myResourceGroup + name: myLinuxWebapp + plan: + resource_group: myAppServicePlan_rg + name: myAppServicePlan + app_settings: + testkey: testvalue + frameworks: + - name: "python" + version: "3.10" +''' + +RETURN = ''' +id: + description: + - ID of current web app. + returned: always + type: str + sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/sites/myWebApp" +''' + +import time +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.core.polling import LROPoller + from azure.mgmt.web.models import Site, AppServicePlan, SkuDescription, NameValuePair, SiteSourceControl, StringDictionary +except ImportError: + # This is handled in azure_rm_common + pass + +container_settings_spec = dict( + name=dict(type='str', required=True), + registry_server_url=dict(type='str'), + registry_server_user=dict(type='str'), + registry_server_password=dict(type='str', no_log=True) +) + +deployment_source_spec = dict( + url=dict(type='str'), + branch=dict(type='str') +) + + +framework_settings_spec = dict( + java_container=dict(type='str', required=True), + java_container_version=dict(type='str', required=True) +) + + +framework_spec = dict( + name=dict( + type='str', + required=True, + choices=['net_framework', 'java', 'php', 'node', 'python', 'dotnetcore', 'ruby']), + version=dict(type='str', required=True), + settings=dict(type='dict', options=framework_settings_spec) +) + + +def _normalize_sku(sku): + if sku is None: + return sku + + sku = sku.upper() + if sku == 'FREE': + return 'F1' + elif sku == 'SHARED': + return 'D1' + return sku + + +def get_sku_name(tier): + tier = tier.upper() + if tier == 'F1' or tier == "FREE": + return 'FREE' + elif tier == 'D1' or tier == "SHARED": + return 'SHARED' + elif tier in ['B1', 'B2', 'B3', 'BASIC']: + return 'BASIC' + elif tier in ['S1', 'S2', 'S3']: + return 'STANDARD' + elif tier in ['P1', 'P2', 'P3']: + return 'PREMIUM' + elif tier in ['P1V2', 'P2V2', 'P3V2']: + return 'PREMIUMV2' + else: + return None + + +def appserviceplan_to_dict(plan): + return dict( + id=plan.id, + name=plan.name, + kind=plan.kind, + location=plan.location, + reserved=plan.reserved, + is_linux=plan.reserved, + provisioning_state=plan.provisioning_state, + tags=plan.tags if plan.tags else None + ) + + +def webapp_to_dict(webapp): + return dict( + id=webapp.id, + name=webapp.name, + location=webapp.location, + client_cert_enabled=webapp.client_cert_enabled, + enabled=webapp.enabled, + reserved=webapp.reserved, + client_affinity_enabled=webapp.client_affinity_enabled, + server_farm_id=webapp.server_farm_id, + host_names_disabled=webapp.host_names_disabled, + https_only=webapp.https_only if hasattr(webapp, 'https_only') else None, + skip_custom_domain_verification=webapp.skip_custom_domain_verification if hasattr(webapp, 'skip_custom_domain_verification') else None, + ttl_in_seconds=webapp.ttl_in_seconds if hasattr(webapp, 'ttl_in_seconds') else None, + state=webapp.state, + tags=webapp.tags if webapp.tags else None + ) + + +class Actions: + CreateOrUpdate, UpdateAppSettings, Delete = range(3) + + +class AzureRMWebApps(AzureRMModuleBase): + """Configuration class for an Azure RM Web App resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + location=dict( + type='str' + ), + plan=dict( + type='raw' + ), + frameworks=dict( + type='list', + elements='dict', + options=framework_spec + ), + container_settings=dict( + type='dict', + options=container_settings_spec + ), + scm_type=dict( + type='str', + ), + always_on=dict( + type='bool', + ), + min_tls_version=dict( + type='str', + choices=['1.0', '1.1', '1.2'], + ), + ftps_state=dict( + type='str', + choices=['AllAllowed', 'FtpsOnly', 'Disabled'], + ), + deployment_source=dict( + type='dict', + options=deployment_source_spec + ), + startup_file=dict( + type='str' + ), + client_affinity_enabled=dict( + type='bool', + default=True + ), + https_only=dict( + type='bool' + ), + app_settings=dict( + type='dict' + ), + purge_app_settings=dict( + type='bool', + default=False + ), + app_state=dict( + type='str', + choices=['started', 'stopped', 'restarted'], + default='started' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + mutually_exclusive = [['container_settings', 'frameworks']] + + self.resource_group = None + self.name = None + self.location = None + + # update in create_or_update as parameters + self.client_affinity_enabled = True + self.https_only = None + + self.tags = None + + # site config, e.g app settings, ssl + self.site_config = dict() + self.app_settings = dict() + self.app_settings_strDic = None + + # app service plan + self.plan = None + + # siteSourceControl + self.deployment_source = dict() + + # site, used at level creation, or update. e.g windows/linux, client_affinity etc first level args + self.site = None + + # property for internal usage, not used for sdk + self.container_settings = None + + self.purge_app_settings = False + self.app_state = 'started' + + self.results = dict( + changed=False, + id=None, + ) + self.state = None + self.to_do = [] + + self.frameworks = None + + # set site_config value from kwargs + self.site_config_updatable_properties = ["net_framework_version", + "java_version", + "php_version", + "python_version", + "scm_type", + "always_on", + "min_tls_version", + "ftps_state"] + + # updatable_properties + self.updatable_properties = ["client_affinity_enabled", + "https_only"] + + self.supported_linux_frameworks = ['ruby', 'php', 'python', 'dotnetcore', 'node', 'java'] + self.supported_windows_frameworks = ['net_framework', 'php', 'python', 'node', 'java'] + + super(AzureRMWebApps, self).__init__(derived_arg_spec=self.module_arg_spec, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()) + ['tags']: + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + if key in ["scm_type", "always_on", "min_tls_version", "ftps_state"]: + self.site_config[key] = kwargs[key] + + old_response = None + response = None + to_be_updated = False + + # set location + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + self.location = resource_group.location + + # get existing web app + old_response = self.get_webapp() + + if old_response: + self.results['id'] = old_response['id'] + + if self.state == 'present': + if not self.plan and not old_response: + self.fail("Please specify plan for newly created web app.") + + if not self.plan: + self.plan = old_response['server_farm_id'] + + self.plan = self.parse_resource_to_dict(self.plan) + + # get app service plan + is_linux = False + old_plan = self.get_app_service_plan() + if old_plan: + is_linux = old_plan['reserved'] + else: + is_linux = self.plan['is_linux'] if 'is_linux' in self.plan else False + + if self.frameworks: + # java is mutually exclusive with other frameworks + if len(self.frameworks) > 1 and any(f['name'] == 'java' for f in self.frameworks): + self.fail('Java is mutually exclusive with other frameworks.') + + if is_linux: + if len(self.frameworks) != 1: + self.fail('Can specify one framework only for Linux web app.') + + if self.frameworks[0]['name'] not in self.supported_linux_frameworks: + self.fail('Unsupported framework {0} for Linux web app.'.format(self.frameworks[0]['name'])) + + self.site_config['linux_fx_version'] = (self.frameworks[0]['name'] + '|' + self.frameworks[0]['version']).upper() + + if self.frameworks[0]['name'] == 'java': + if self.frameworks[0]['version'] != '8': + self.fail("Linux web app only supports java 8.") + if self.frameworks[0]['settings'] and self.frameworks[0]['settings']['java_container'].lower() != 'tomcat': + self.fail("Linux web app only supports tomcat container.") + + if self.frameworks[0]['settings'] and self.frameworks[0]['settings']['java_container'].lower() == 'tomcat': + self.site_config['linux_fx_version'] = 'TOMCAT|' + self.frameworks[0]['settings']['java_container_version'] + '-jre8' + else: + self.site_config['linux_fx_version'] = 'JAVA|8-jre8' + else: + for fx in self.frameworks: + if fx.get('name') not in self.supported_windows_frameworks: + self.fail('Unsupported framework {0} for Windows web app.'.format(fx.get('name'))) + else: + self.site_config[fx.get('name') + '_version'] = fx.get('version') + + if 'settings' in fx and fx['settings'] is not None: + for key, value in fx['settings'].items(): + self.site_config[key] = value + + if not self.app_settings: + self.app_settings = dict() + + if self.container_settings: + linux_fx_version = 'DOCKER|' + + if self.container_settings.get('registry_server_url'): + self.app_settings['DOCKER_REGISTRY_SERVER_URL'] = 'https://' + self.container_settings['registry_server_url'] + + linux_fx_version += self.container_settings['registry_server_url'] + '/' + + linux_fx_version += self.container_settings['name'] + + # Use given name as is if it starts with allowed values of multi-container application + if self.container_settings['name'].startswith('COMPOSE|') or self.container_settings['name'].startswith('KUBE|'): + linux_fx_version = self.container_settings['name'] + + self.site_config['linux_fx_version'] = linux_fx_version + + if self.container_settings.get('registry_server_user'): + self.app_settings['DOCKER_REGISTRY_SERVER_USERNAME'] = self.container_settings['registry_server_user'] + + if self.container_settings.get('registry_server_password'): + self.app_settings['DOCKER_REGISTRY_SERVER_PASSWORD'] = self.container_settings['registry_server_password'] + + # init site + self.site = Site(location=self.location, site_config=self.site_config) + + if self.https_only is not None: + self.site.https_only = self.https_only + + self.site.client_affinity_enabled = self.client_affinity_enabled + + # check if the web app already present in the resource group + if not old_response: + self.log("Web App instance doesn't exist") + + to_be_updated = True + self.to_do.append(Actions.CreateOrUpdate) + self.site.tags = self.tags + + # service plan is required for creation + if not self.plan: + self.fail("Please specify app service plan in plan parameter.") + + if not old_plan: + # no existing service plan, create one + if (not self.plan.get('name') or not self.plan.get('sku')): + self.fail('Please specify name, is_linux, sku in plan') + + if 'location' not in self.plan: + plan_resource_group = self.get_resource_group(self.plan['resource_group']) + self.plan['location'] = plan_resource_group.location + + old_plan = self.create_app_service_plan() + + self.site.server_farm_id = old_plan['id'] + + # if linux, setup startup_file + if old_plan['is_linux']: + if hasattr(self, 'startup_file'): + self.site_config['app_command_line'] = self.startup_file + + # set app setting + if self.app_settings: + app_settings = [] + for key in self.app_settings.keys(): + app_settings.append(NameValuePair(name=key, value=self.app_settings[key])) + + self.site_config['app_settings'] = app_settings + else: + # existing web app, do update + self.log("Web App instance already exists") + + self.log('Result: {0}'.format(old_response)) + + update_tags, self.site.tags = self.update_tags(old_response.get('tags', None)) + + if update_tags: + to_be_updated = True + + # check if root level property changed + if self.is_updatable_property_changed(old_response): + to_be_updated = True + self.to_do.append(Actions.CreateOrUpdate) + + # check if site_config changed + old_config = self.get_webapp_configuration() + + if self.is_site_config_changed(old_config): + to_be_updated = True + self.to_do.append(Actions.CreateOrUpdate) + + # check if linux_fx_version changed + if old_config.linux_fx_version != self.site_config.get('linux_fx_version', ''): + to_be_updated = True + self.to_do.append(Actions.CreateOrUpdate) + + self.app_settings_strDic = self.list_app_settings() + + # purge existing app_settings: + if self.purge_app_settings: + to_be_updated = True + self.app_settings_strDic = dict() + self.to_do.append(Actions.UpdateAppSettings) + + # check if app settings changed + if self.purge_app_settings or self.is_app_settings_changed(): + to_be_updated = True + self.to_do.append(Actions.UpdateAppSettings) + + if self.app_settings: + for key in self.app_settings.keys(): + self.app_settings_strDic[key] = self.app_settings[key] + + elif self.state == 'absent': + if old_response: + self.log("Delete Web App instance") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_webapp() + + self.log('Web App instance deleted') + + else: + self.log("Web app {0} not exists.".format(self.name)) + + if to_be_updated: + self.log('Need to Create/Update web app') + self.results['changed'] = True + + if self.check_mode: + return self.results + + if Actions.CreateOrUpdate in self.to_do: + response = self.create_update_webapp() + + self.results['id'] = response['id'] + + if Actions.UpdateAppSettings in self.to_do: + update_response = self.update_app_settings() + self.results['id'] = update_response.id + + webapp = None + if old_response: + webapp = old_response + if response: + webapp = response + + if webapp: + if (webapp['state'] != 'Stopped' and self.app_state == 'stopped') or \ + (webapp['state'] != 'Running' and self.app_state == 'started') or \ + self.app_state == 'restarted': + + self.results['changed'] = True + if self.check_mode: + return self.results + + self.set_webapp_state(self.app_state) + + return self.results + + # compare existing web app with input, determine weather it's update operation + def is_updatable_property_changed(self, existing_webapp): + for property_name in self.updatable_properties: + if hasattr(self, property_name) and getattr(self, property_name) is not None and \ + getattr(self, property_name) != existing_webapp.get(property_name, None): + return True + + return False + + # compare xxx_version + def is_site_config_changed(self, existing_config): + for updatable_property in self.site_config_updatable_properties: + if self.site_config.get(updatable_property): + if not getattr(existing_config, updatable_property) or \ + str(getattr(existing_config, updatable_property)).upper() != str(self.site_config.get(updatable_property)).upper(): + return True + + return False + + # comparing existing app setting with input, determine whether it's changed + def is_app_settings_changed(self): + if self.app_settings: + if self.app_settings_strDic: + for key in self.app_settings.keys(): + if self.app_settings[key] != self.app_settings_strDic.get(key, None): + return True + else: + return True + return False + + # comparing deployment source with input, determine wheather it's changed + def is_deployment_source_changed(self, existing_webapp): + if self.deployment_source: + if self.deployment_source.get('url') \ + and self.deployment_source['url'] != existing_webapp.get('site_source_control')['url']: + return True + + if self.deployment_source.get('branch') \ + and self.deployment_source['branch'] != existing_webapp.get('site_source_control')['branch']: + return True + + return False + + def create_update_webapp(self): + ''' + Creates or updates Web App with the specified configuration. + + :return: deserialized Web App instance state dictionary + ''' + self.log( + "Creating / Updating the Web App instance {0}".format(self.name)) + + try: + response = self.web_client.web_apps.begin_create_or_update(resource_group_name=self.resource_group, + name=self.name, + site_envelope=self.site) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + + except Exception as exc: + self.log('Error attempting to create the Web App instance.') + self.fail("Error creating the Web App instance: {0}".format(str(exc))) + return webapp_to_dict(response) + + def delete_webapp(self): + ''' + Deletes specified Web App instance in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the Web App instance {0}".format(self.name)) + try: + self.web_client.web_apps.delete(resource_group_name=self.resource_group, name=self.name) + except Exception as e: + self.log('Error attempting to delete the Web App instance.') + self.fail("Error deleting the Web App instance: {0}".format(str(e))) + + return True + + def get_webapp(self): + ''' + Gets the properties of the specified Web App. + + :return: deserialized Web App instance state dictionary + ''' + self.log( + "Checking if the Web App instance {0} is present".format(self.name)) + + response = None + + try: + response = self.web_client.web_apps.get(resource_group_name=self.resource_group, name=self.name) + + # Newer SDK versions (0.40.0+) seem to return None if it doesn't exist instead of raising error + if response is not None: + self.log("Response : {0}".format(response)) + self.log("Web App instance : {0} found".format(response.name)) + return webapp_to_dict(response) + + except ResourceNotFoundError: + pass + + self.log("Didn't find web app {0} in resource group {1}".format(self.name, self.resource_group)) + + return False + + def get_app_service_plan(self): + ''' + Gets app service plan + :return: deserialized app service plan dictionary + ''' + self.log("Get App Service Plan {0}".format(self.plan['name'])) + + try: + response = self.web_client.app_service_plans.get( + resource_group_name=self.plan['resource_group'], + name=self.plan['name']) + + # Newer SDK versions (0.40.0+) seem to return None if it doesn't exist instead of raising error + if response is not None: + self.log("Response : {0}".format(response)) + self.log("App Service Plan : {0} found".format(response.name)) + + return appserviceplan_to_dict(response) + except ResourceNotFoundError: + pass + + self.log("Didn't find app service plan {0} in resource group {1}".format( + self.plan['name'], self.plan['resource_group'])) + + return False + + def create_app_service_plan(self): + ''' + Creates app service plan + :return: deserialized app service plan dictionary + ''' + self.log("Create App Service Plan {0}".format(self.plan['name'])) + + try: + # normalize sku + sku = _normalize_sku(self.plan['sku']) + + sku_def = SkuDescription(tier=get_sku_name( + sku), name=sku, capacity=(self.plan.get('number_of_workers', None))) + plan_def = AppServicePlan( + location=self.plan['location'], app_service_plan_name=self.plan['name'], sku=sku_def, reserved=(self.plan.get('is_linux', None))) + + poller = self.web_client.app_service_plans.begin_create_or_update( + resource_group_name=self.plan['resource_group'], name=self.plan['name'], app_service_plan=plan_def) + + if isinstance(poller, LROPoller): + response = self.get_poller_result(poller) + + self.log("Response : {0}".format(response)) + + return appserviceplan_to_dict(response) + except Exception as ex: + self.fail("Failed to create app service plan {0} in resource group {1}: {2}".format( + self.plan['name'], self.plan['resource_group'], str(ex))) + + def list_app_settings(self): + ''' + List application settings + :return: deserialized list response + ''' + self.log("List application setting") + + try: + response = self.web_client.web_apps.list_application_settings(resource_group_name=self.resource_group, name=self.name) + self.log("Response : {0}".format(response)) + + return response.properties + except Exception as ex: + self.fail("Failed to list application settings for web app {0} in resource group {1}: {2}".format( + self.name, self.resource_group, str(ex))) + + def update_app_settings(self): + ''' + Update application settings + :return: deserialized updating response + ''' + self.log("Update application setting") + + try: + settings = StringDictionary( + properties=self.app_settings_strDic + ) + response = self.web_client.web_apps.update_application_settings( + resource_group_name=self.resource_group, name=self.name, app_settings=settings) + self.log("Response : {0}".format(response)) + + return response + except Exception as ex: + self.fail("Failed to update application settings for web app {0} in resource group {1}: {2}".format( + self.name, self.resource_group, str(ex))) + + def create_or_update_source_control(self): + ''' + Update site source control + :return: deserialized updating response + ''' + self.log("Update site source control") + + if self.deployment_source is None: + return False + + self.deployment_source['is_manual_integration'] = False + self.deployment_source['is_mercurial'] = False + + try: + site_source_control = SiteSourceControl( + repo_url=self.deployment_source.get('url'), + branch=self.deployment_source.get('branch') + ) + response = self.web_client.web_apps.begin_create_or_update_source_control( + resource_group_name=self.resource_group, name=self.name, site_source_control=site_source_control) + self.log("Response : {0}".format(response)) + + return response.as_dict() + except Exception: + self.fail("Failed to update site source control for web app {0} in resource group {1}".format( + self.name, self.resource_group)) + + def get_webapp_configuration(self): + ''' + Get web app configuration + :return: deserialized web app configuration response + ''' + self.log("Get web app configuration") + + try: + response = self.web_client.web_apps.get_configuration( + resource_group_name=self.resource_group, name=self.name) + self.log("Response : {0}".format(response)) + + return response + except ResourceNotFoundError as ex: + self.log("Failed to get configuration for web app {0} in resource group {1}: {2}".format( + self.name, self.resource_group, str(ex))) + + return False + + def set_webapp_state(self, appstate): + ''' + Start/stop/restart web app + :return: deserialized updating response + ''' + try: + if appstate == 'started': + response = self.web_client.web_apps.start(resource_group_name=self.resource_group, name=self.name) + elif appstate == 'stopped': + response = self.web_client.web_apps.stop(resource_group_name=self.resource_group, name=self.name) + elif appstate == 'restarted': + response = self.web_client.web_apps.restart(resource_group_name=self.resource_group, name=self.name) + else: + self.fail("Invalid web app state {0}".format(appstate)) + + self.log("Response : {0}".format(response)) + + return response + except Exception as ex: + request_id = ex.request_id if ex.request_id else '' + self.log("Failed to {0} web app {1} in resource group {2}, request_id {3} - {4}".format( + appstate, self.name, self.resource_group, request_id, str(ex))) + + +def main(): + """Main execution""" + AzureRMWebApps() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webapp_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webapp_info.py new file mode 100644 index 000000000..36b3eb3d3 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webapp_info.py @@ -0,0 +1,516 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Yunge Zhu, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_webapp_info + +version_added: "0.1.2" + +short_description: Get Azure web app facts + +description: + - Get facts for a specific web app or all web app in a resource group, or all web app in current subscription. + +options: + name: + description: + - Only show results for a specific web app. + resource_group: + description: + - Limit results by resource group. + return_publish_profile: + description: + - Indicate whether to return publishing profile of the web app. + default: False + type: bool + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + type: list + elements: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Yunge Zhu (@yungezz) +''' + +EXAMPLES = ''' + - name: Get facts for web app by name + azure_rm_webapp_info: + resource_group: myResourceGroup + name: winwebapp1 + + - name: Get facts for web apps in resource group + azure_rm_webapp_info: + resource_group: myResourceGroup + + - name: Get facts for web apps with tags + azure_rm_webapp_info: + tags: + - testtag + - foo:bar +''' + +RETURN = ''' +webapps: + description: + - List of web apps. + returned: always + type: complex + contains: + id: + description: + - ID of the web app. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/sites/myWebApp + name: + description: + - Name of the web app. + returned: always + type: str + sample: winwebapp1 + resource_group: + description: + - Resource group of the web app. + returned: always + type: str + sample: myResourceGroup + location: + description: + - Location of the web app. + returned: always + type: str + sample: eastus + plan: + description: + - ID of app service plan used by the web app. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/serverfarms/myAppServicePlan + app_settings: + description: + - App settings of the application. Only returned when web app has app settings. + returned: always + type: dict + sample: { + "testkey": "testvalue", + "testkey2": "testvalue2" + } + frameworks: + description: + - Frameworks of the application. Only returned when web app has frameworks. + returned: always + type: list + sample: [ + { + "name": "net_framework", + "version": "v4.0" + }, + { + "name": "java", + "settings": { + "java_container": "tomcat", + "java_container_version": "8.5" + }, + "version": "1.7" + }, + { + "name": "php", + "version": "5.6" + } + ] + always_on: + description: + - If the app is kept loaded even when there's no traffic. + returned: always + type: bool + sample: true + min_tls_version: + description: + - The minimum TLS encryption version required for the app. + returned: always + type: str + sample: 1.2 + ftps_state: + description: + - The state of the FTP/FTPS service. + returned: always + type: str + sample: FtpsOnly + availability_state: + description: + - Availability of this web app. + returned: always + type: str + sample: Normal + default_host_name: + description: + - Host name of the web app. + returned: always + type: str + sample: vxxisurg397winapp4.azurewebsites.net + enabled: + description: + - Indicates the web app enabled or not. + returned: always + type: bool + sample: true + enabled_host_names: + description: + - Enabled host names of the web app. + returned: always + type: list + sample: [ + "vxxisurg397winapp4.azurewebsites.net", + "vxxisurg397winapp4.scm.azurewebsites.net" + ] + host_name_ssl_states: + description: + - SSL state per host names of the web app. + returned: always + type: list + sample: [ + { + "hostType": "Standard", + "name": "vxxisurg397winapp4.azurewebsites.net", + "sslState": "Disabled" + }, + { + "hostType": "Repository", + "name": "vxxisurg397winapp4.scm.azurewebsites.net", + "sslState": "Disabled" + } + ] + host_names: + description: + - Host names of the web app. + returned: always + type: list + sample: [ + "vxxisurg397winapp4.azurewebsites.net" + ] + outbound_ip_addresses: + description: + - Outbound IP address of the web app. + returned: always + type: str + sample: "40.71.11.131,40.85.166.200,168.62.166.67,137.135.126.248,137.135.121.45" + ftp_publish_url: + description: + - Publishing URL of the web app when deployment type is FTP. + returned: always + type: str + sample: ftp://xxxx.ftp.azurewebsites.windows.net + state: + description: + - State of the web app. + returned: always + type: str + sample: running + publishing_username: + description: + - Publishing profile user name. + returned: only when I(return_publish_profile=True). + type: str + sample: "$vxxisuRG397winapp4" + publishing_password: + description: + - Publishing profile password. + returned: only when I(return_publish_profile=True). + type: str + sample: "uvANsPQpGjWJmrFfm4Ssd5rpBSqGhjMk11pMSgW2vCsQtNx9tcgZ0xN26s9A" + tags: + description: + - Tags assigned to the resource. Dictionary of string:string pairs. + returned: always + type: dict + sample: { tag1: abc } +''' +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.core.polling import LROPoller + from azure.mgmt.web.models import CsmPublishingProfileOptions +except Exception: + # This is handled in azure_rm_common + pass + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase +try: + import xmltodict +except Exception: + pass + +AZURE_OBJECT_CLASS = 'WebApp' + + +class AzureRMWebAppInfo(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type='str'), + resource_group=dict(type='str'), + tags=dict(type='list', elements='str'), + return_publish_profile=dict(type='bool', default=False), + ) + + self.results = dict( + changed=False, + webapps=[], + ) + + self.name = None + self.resource_group = None + self.tags = None + self.return_publish_profile = False + + self.framework_names = ['net_framework', 'java', 'php', 'node', 'python', 'dotnetcore', 'ruby'] + + super(AzureRMWebAppInfo, self).__init__(self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + facts_module=True) + + def exec_module(self, **kwargs): + is_old_facts = self.module._name == 'azure_rm_webapp_facts' + if is_old_facts: + self.module.deprecate("The 'azure_rm_webapp_facts' module has been renamed to 'azure_rm_webapp_info'", version=(2.9, )) + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.name: + self.results['webapps'] = self.list_by_name() + elif self.resource_group: + self.results['webapps'] = self.list_by_resource_group() + else: + self.results['webapps'] = self.list_all() + + return self.results + + def list_by_name(self): + self.log('Get web app {0}'.format(self.name)) + item = None + result = [] + + try: + item = self.web_client.web_apps.get(resource_group_name=self.resource_group, name=self.name) + except ResourceNotFoundError: + pass + + if item and self.has_tags(item.tags, self.tags): + curated_result = self.get_curated_webapp(self.resource_group, self.name, item) + result = [curated_result] + + return result + + def list_by_resource_group(self): + self.log('List web apps in resource groups {0}'.format(self.resource_group)) + try: + response = list(self.web_client.web_apps.list_by_resource_group(resource_group_name=self.resource_group)) + except Exception as exc: + request_id = exc.request_id if exc.request_id else '' + self.fail("Error listing web apps in resource groups {0}, request id: {1} - {2}".format(self.resource_group, request_id, str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + curated_output = self.get_curated_webapp(self.resource_group, item.name, item) + results.append(curated_output) + return results + + def list_all(self): + self.log('List web apps in current subscription') + try: + response = list(self.web_client.web_apps.list()) + except Exception as exc: + request_id = exc.request_id if exc.request_id else '' + self.fail("Error listing web apps, request id {0} - {1}".format(request_id, str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + curated_output = self.get_curated_webapp(item.resource_group, item.name, item) + results.append(curated_output) + return results + + def list_webapp_configuration(self, resource_group, name): + self.log('Get web app {0} configuration'.format(name)) + + response = [] + + try: + response = self.web_client.web_apps.get_configuration(resource_group_name=resource_group, name=name) + except Exception as ex: + request_id = ex.request_id if ex.request_id else '' + self.fail('Error getting web app {0} configuration, request id {1} - {2}'.format(name, request_id, str(ex))) + + return response.as_dict() + + def list_webapp_appsettings(self, resource_group, name): + self.log('Get web app {0} app settings'.format(name)) + + response = [] + + try: + response = self.web_client.web_apps.list_application_settings(resource_group_name=resource_group, name=name) + except Exception as ex: + request_id = ex.request_id if ex.request_id else '' + self.fail('Error getting web app {0} app settings, request id {1} - {2}'.format(name, request_id, str(ex))) + + return response.as_dict() + + def get_publish_credentials(self, resource_group, name): + self.log('Get web app {0} publish credentials'.format(name)) + try: + poller = self.web_client.web_apps.begin_list_publishing_credentials(resource_group_name=resource_group, name=name) + if isinstance(poller, LROPoller): + response = self.get_poller_result(poller) + except Exception as ex: + request_id = ex.request_id if ex.request_id else '' + self.fail('Error getting web app {0} publishing credentials - {1}'.format(request_id, str(ex))) + return response + + def get_webapp_ftp_publish_url(self, resource_group, name): + + self.log('Get web app {0} app publish profile'.format(name)) + + url = None + try: + publishing_profile_options = CsmPublishingProfileOptions( + format="Ftp" + ) + content = self.web_client.web_apps.list_publishing_profile_xml_with_secrets(resource_group_name=resource_group, + name=name, + publishing_profile_options=publishing_profile_options) + if not content: + return url + + full_xml = '' + for f in content: + full_xml += f.decode() + profiles = xmltodict.parse(full_xml, xml_attribs=True)['publishData']['publishProfile'] + + if not profiles: + return url + + for profile in profiles: + if profile['@publishMethod'] == 'FTP': + url = profile['@publishUrl'] + + except Exception as ex: + self.fail('Error getting web app {0} app settings - {1}'.format(name, str(ex))) + + return url + + def get_curated_webapp(self, resource_group, name, webapp): + pip = self.serialize_obj(webapp, AZURE_OBJECT_CLASS) + + try: + site_config = self.list_webapp_configuration(resource_group, name) + app_settings = self.list_webapp_appsettings(resource_group, name) + publish_cred = self.get_publish_credentials(resource_group, name) + ftp_publish_url = self.get_webapp_ftp_publish_url(resource_group, name) + except Exception: + pass + return self.construct_curated_webapp(webapp=pip, + configuration=site_config, + app_settings=app_settings, + deployment_slot=None, + ftp_publish_url=ftp_publish_url, + publish_credentials=publish_cred) + + def construct_curated_webapp(self, + webapp, + configuration=None, + app_settings=None, + deployment_slot=None, + ftp_publish_url=None, + publish_credentials=None): + curated_output = dict() + curated_output['id'] = webapp['id'] + curated_output['name'] = webapp['name'] + curated_output['resource_group'] = webapp['properties']['resourceGroup'] + curated_output['location'] = webapp['location'] + curated_output['plan'] = webapp['properties']['serverFarmId'] + curated_output['tags'] = webapp.get('tags', None) + + # important properties from output. not match input arguments. + curated_output['app_state'] = webapp['properties']['state'] + curated_output['availability_state'] = webapp['properties']['availabilityState'] + curated_output['default_host_name'] = webapp['properties']['defaultHostName'] + curated_output['host_names'] = webapp['properties']['hostNames'] + curated_output['enabled'] = webapp['properties']['enabled'] + curated_output['enabled_host_names'] = webapp['properties']['enabledHostNames'] + curated_output['host_name_ssl_states'] = webapp['properties']['hostNameSslStates'] + curated_output['outbound_ip_addresses'] = webapp['properties']['outboundIpAddresses'] + + # curated site_config + if configuration: + curated_output['frameworks'] = [] + for fx_name in self.framework_names: + fx_version = configuration.get(fx_name + '_version', None) + if fx_version: + fx = { + 'name': fx_name, + 'version': fx_version + } + # java container setting + if fx_name == 'java': + if configuration['java_container'] and configuration['java_container_version']: + settings = { + 'java_container': configuration['java_container'].lower(), + 'java_container_version': configuration['java_container_version'] + } + fx['settings'] = settings + + curated_output['frameworks'].append(fx) + + # linux_fx_version + if configuration.get('linux_fx_version', None): + tmp = configuration.get('linux_fx_version').split("|") + if len(tmp) == 2: + curated_output['frameworks'].append({'name': tmp[0].lower(), 'version': tmp[1]}) + + curated_output['always_on'] = configuration.get('always_on') + curated_output['ftps_state'] = configuration.get('ftps_state') + curated_output['min_tls_version'] = configuration.get('min_tls_version') + + # curated app_settings + if app_settings and app_settings.get('properties', None): + curated_output['app_settings'] = dict() + for item in app_settings['properties']: + curated_output['app_settings'][item] = app_settings['properties'][item] + + # curated deploymenet_slot + if deployment_slot: + curated_output['deployment_slot'] = deployment_slot + + # ftp_publish_url + if ftp_publish_url: + curated_output['ftp_publish_url'] = ftp_publish_url + + # curated publish credentials + if publish_credentials and self.return_publish_profile: + curated_output['publishing_username'] = publish_credentials.publishing_user_name + curated_output['publishing_password'] = publish_credentials.publishing_password + return curated_output + + +def main(): + AzureRMWebAppInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webappaccessrestriction.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webappaccessrestriction.py new file mode 100644 index 000000000..d8b49a335 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webappaccessrestriction.py @@ -0,0 +1,388 @@ +#!/usr/bin/python +# +# Copyright (c) 2021 Ross Bender (@l3ender) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_webappaccessrestriction +version_added: "1.8.0" +short_description: Manage web app network access restrictions +description: + - Add, remove, or update network access restrictions for a web app. +options: + name: + description: + - Name of the web app. + required: true + type: str + resource_group: + description: + - Resource group of the web app. + required: true + type: str + state: + description: + - State of the access restrictions. Use C(present) to create or update and C(absent) to delete. + type: str + default: present + choices: + - absent + - present + ip_security_restrictions: + description: + - The web app's HTTP access restrictions. + type: list + elements: dict + suboptions: + name: + description: + - Name of the access restriction. + type: str + description: + description: + - Description of the access restriction. + type: str + action: + description: + - Traffic action for the access restriction. + type: str + default: Allow + choices: + - Allow + - Deny + priority: + description: + - Numerical priority of the access restriction. + type: int + required: true + ip_address: + description: + - IPv4 address (with subnet mask) of the access restriction. + type: str + required: true + scm_ip_security_restrictions: + description: + - >- + The web app's SCM access restrictions. If I(scm_ip_security_restrictions_use_main) is set to C(true), + the SCM restrictions will be configured but not used. + type: list + elements: dict + suboptions: + name: + description: + - Name of the access restriction. + type: str + description: + description: + - Description of the access restriction. + type: str + action: + description: + - Traffic action for the access restriction. + type: str + default: Allow + choices: + - Allow + - Deny + priority: + description: + - Numerical priority of the access restriction. + type: int + required: true + ip_address: + description: + - IPv4 address (with subnet mask) of the access restriction. + type: str + required: true + scm_ip_security_restrictions_use_main: + description: + - >- + Set to C(true) to have the HTTP access restrictions also apply to the SCM site. + If I(scm_ip_security_restrictions) are also applied, they will configured but not used. + default: false + type: bool + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Ross Bender (@l3ender) +''' + +EXAMPLES = ''' + - name: Configure web app access restrictions. + azure.azcollection.azure_rm_webappaccessrestriction: + name: "MyWebapp" + resource_group: "MyResourceGroup" + ip_security_restrictions: + - name: "Datacenter 1" + action: "Allow" + ip_address: "1.1.1.1/24" + priority: 1 + - name: "Datacenter 2" + action: "Allow" + ip_address: "2.2.2.2/24" + priority: 2 + scm_ip_security_restrictions_use_main: true + + - name: Delete web app network access restrictions. + azure.azcollection.azure_rm_webappaccessrestriction: + name: "MyWebapp" + resource_group: "MyResourceGroup" + state: "absent" +''' + +RETURN = ''' +ip_security_restrictions: + description: + - The web app's HTTP access restrictions. + returned: always + type: list + elements: dict + contains: + name: + description: + - Name of the access restriction. + returned: always + type: str + sample: my-access-restriction + description: + description: + - Description of the access restriction. + returned: always + type: str + sample: my-access-restriction-description + action: + description: + - Traffic action of the access restriction. + returned: always + type: str + sample: Allow + priority: + description: + - Numerical priority of the access restriction. + returned: always + type: int + sample: 1 + ip_address: + description: + - IP address of the access restriction. + returned: always + type: str + sample: 1.1.1.1/32 +scm_ip_security_restrictions: + description: + - The web app's SCM access restrictions. + returned: always + type: list + elements: dict + contains: + name: + description: + - Name of the access restriction. + returned: always + type: str + sample: my-access-restriction + description: + description: + - Description of the access restriction. + returned: always + type: str + sample: my-access-restriction-description + action: + description: + - Traffic action of the access restriction. + returned: always + type: str + sample: Allow + priority: + description: + - Numerical priority of the access restriction. + returned: always + type: int + sample: 1 + ip_address: + description: + - IP address of the access restriction. + returned: always + type: str + sample: 1.1.1.1/32 +scm_ip_security_restrictions_use_main: + description: + - Whether the HTTP access restrictions are used for SCM access. + returned: always + type: bool + sample: false +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.mgmt.web.models import IpSecurityRestriction +except Exception: + # This is handled in azure_rm_common + pass + +ip_restriction_spec = dict( + name=dict(type='str'), + description=dict(type='str'), + action=dict(type='str', default='Allow', choices=['Allow', 'Deny']), + priority=dict(type='int', required=True), + ip_address=dict(type='str', required=True), +) + + +class AzureRMWebAppAccessRestriction(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type='str', required=True), + resource_group=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + ip_security_restrictions=dict(type='list', default=[], elements='dict', options=ip_restriction_spec), + scm_ip_security_restrictions=dict(type='list', default=[], elements='dict', options=ip_restriction_spec), + scm_ip_security_restrictions_use_main=dict(type='bool', default=False), + ) + + self.results = dict( + changed=False, + ip_security_restrictions=[], + scm_ip_security_restrictions=[], + scm_ip_security_restrictions_use_main=False, + ) + + self.state = None + self.name = None + self.resource_group = None + self.ip_security_restrictions = [] + self.scm_ip_security_restrictions = [] + self.scm_ip_security_restrictions_use_main = False + + super(AzureRMWebAppAccessRestriction, self).__init__(self.module_arg_spec, + supports_check_mode=True, + supports_tags=False) + + def exec_module(self, **kwargs): + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + changed = False + site_config = self.get_webapp_config() + self.results.update(self.set_results(site_config)) + + if self.state == 'absent' and self.has_access_restrictions(site_config): + changed = True + if not self.check_mode: + self.log('Removing all access restrictions for webapp {0}'.format(self.name)) + site_config.ip_security_restrictions = [] + site_config.scm_ip_security_restrictions = [] + site_config.scm_ip_security_restrictions_use_main = False + self.update_webapp_config(site_config) + self.results['ip_security_restrictions'] = [] + self.results['scm_ip_security_restrictions'] = [] + self.results['scm_ip_security_restrictions_use_main'] = False + elif self.state == 'present': + if not self.has_access_restrictions(site_config) and (self.ip_security_restrictions or self.scm_ip_security_restrictions): + self.log('Adding new access restrictions for webapp {0}'.format(self.name)) + changed = True + elif self.has_updates(site_config): + self.log('Detected change in existing access restrictions for webapp {0}'.format(self.name)) + changed = True + + if changed: + site_config = self.get_updated_config(site_config) + if not self.check_mode: + self.log('Updating site config for webapp {0}'.format(self.name)) + site_config = self.update_webapp_config(site_config) + + self.results.update(self.set_results(site_config)) + + self.results['changed'] = changed + + return self.results + + def get_updated_config(self, site_config): + site_config.ip_security_restrictions = [] if not self.ip_security_restrictions else self.to_restriction_obj_list(self.ip_security_restrictions) + site_config.scm_ip_security_restrictions = [] if not self.scm_ip_security_restrictions else ( + self.to_restriction_obj_list(self.scm_ip_security_restrictions)) + site_config.scm_ip_security_restrictions_use_main = self.scm_ip_security_restrictions_use_main + return site_config + + def has_updates(self, site_config): + return (site_config.scm_ip_security_restrictions_use_main != self.scm_ip_security_restrictions_use_main or self.ip_security_restrictions and + self.ip_security_restrictions != self.to_restriction_dict_list(site_config.ip_security_restrictions) or self.scm_ip_security_restrictions and + self.scm_ip_security_restrictions != self.to_restriction_dict_list(site_config.scm_ip_security_restrictions)) + + def has_access_restrictions(self, site_config): + return site_config.ip_security_restrictions or site_config.scm_ip_security_restrictions + + def get_webapp_config(self): + try: + return self.web_client.web_apps.get_configuration(resource_group_name=self.resource_group, name=self.name) + except Exception as exc: + self.fail("Error getting webapp config {0} (rg={1}) - {2}".format(self.name, self.resource_group, str(exc))) + + def update_webapp_config(self, param): + try: + return self.web_client.web_apps.create_or_update_configuration(resource_group_name=self.resource_group, name=self.name, site_config=param) + except Exception as exc: + self.fail("Error creating/updating webapp config {0} (rg={1}) - {2}".format(self.name, self.resource_group, str(exc))) + + def set_results(self, site_config): + output = dict() + if site_config.ip_security_restrictions: + output['ip_security_restrictions'] = self.to_restriction_dict_list(site_config.ip_security_restrictions) + if site_config.scm_ip_security_restrictions: + output['scm_ip_security_restrictions'] = self.to_restriction_dict_list(site_config.scm_ip_security_restrictions) + output['scm_ip_security_restrictions_use_main'] = site_config.scm_ip_security_restrictions_use_main + return output + + def to_restriction_obj_list(self, restriction_dict_list): + return [] if not restriction_dict_list else [self.to_restriction_obj(restriction) for restriction in restriction_dict_list] + + def to_restriction_obj(self, restriction_dict): + return IpSecurityRestriction( + name=restriction_dict['name'], + description=restriction_dict['description'], + action=restriction_dict['action'], + priority=restriction_dict['priority'], + ip_address=restriction_dict['ip_address'], + ) + + def to_restriction_dict_list(self, restriction_obj_list): + restrictions = [] + if restriction_obj_list: + for r in restriction_obj_list: + restriction = self.to_restriction_dict(r) + if not self.is_azure_default_restriction(restriction): + restrictions.append(restriction) + + return restrictions + + def is_azure_default_restriction(self, restriction_obj): + return (restriction_obj["action"] == "Allow" and restriction_obj["ip_address"] == "Any" and restriction_obj["priority"] == 1) or \ + (restriction_obj["action"] == "Deny" and restriction_obj["ip_address"] == "Any" and restriction_obj["priority"] == 2147483647) + + def to_restriction_dict(self, restriction_obj): + return dict( + name=restriction_obj.name, + description=restriction_obj.description, + action=restriction_obj.action, + priority=restriction_obj.priority, + ip_address=restriction_obj.ip_address, + ) + + +def main(): + AzureRMWebAppAccessRestriction() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webappaccessrestriction_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webappaccessrestriction_info.py new file mode 100644 index 000000000..0c3eb3260 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webappaccessrestriction_info.py @@ -0,0 +1,207 @@ +#!/usr/bin/python +# +# Copyright (c) 2021 Ross Bender (@l3ender) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_webappaccessrestriction_info +version_added: "1.8.0" +short_description: Retrieve web app network access restriction facts +description: + - Get facts for a web app's network access restrictions. +options: + name: + description: + - Name of the web app. + required: true + type: str + resource_group: + description: + - Resource group of the web app. + required: true + type: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Ross Bender (@l3ender) +''' + +EXAMPLES = ''' + - name: View web app access restrictions. + azure.azcollection.azure_rm_webappaccessrestriction_info: + name: "MyWebapp" + resource_group: "MyResourceGroup" +''' + +RETURN = ''' +ip_security_restrictions: + description: + - The web app's HTTP access restrictions. + returned: always + type: list + elements: dict + contains: + name: + description: + - Name of the access restriction. + returned: always + type: str + sample: my-access-restriction + description: + description: + - Description of the access restriction. + returned: always + type: str + sample: my-access-restriction-description + action: + description: + - Traffic action of the access restriction. + returned: always + type: str + sample: Allow + priority: + description: + - Numerical priority of the access restriction. + returned: always + type: int + sample: 1 + ip_address: + description: + - IP address of the access restriction. + returned: always + type: str + sample: 1.1.1.1/32 +scm_ip_security_restrictions: + description: + - The web app's SCM access restrictions. + returned: always + type: list + elements: dict + contains: + name: + description: + - Name of the access restriction. + returned: always + type: str + sample: my-access-restriction + description: + description: + - Description of the access restriction. + returned: always + type: str + sample: my-access-restriction-description + action: + description: + - Traffic action of the access restriction. + returned: always + type: str + sample: Allow + priority: + description: + - Numerical priority of the access restriction. + returned: always + type: int + sample: 1 + ip_address: + description: + - IP address of the access restriction. + returned: always + type: str + sample: 1.1.1.1/32 +scm_ip_security_restrictions_use_main: + description: + - Whether the HTTP access restrictions are used for SCM access. + returned: always + type: bool + sample: false +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + + +class AzureRMWebAppAccessRestrictionInfo(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type='str', required=True), + resource_group=dict(type='str', required=True), + ) + + self.results = dict( + changed=False, + ip_security_restrictions=[], + scm_ip_security_restrictions=[], + scm_ip_security_restrictions_use_main=False, + ) + + self.name = None + self.resource_group = None + + super(AzureRMWebAppAccessRestrictionInfo, self).__init__(self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + facts_module=True) + + def exec_module(self, **kwargs): + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + site_config = self.get_webapp_config() + self.results.update(self.set_results(site_config)) + + return self.results + + def get_webapp_config(self): + try: + return self.web_client.web_apps.get_configuration(resource_group_name=self.resource_group, name=self.name) + except Exception as exc: + self.fail("Error getting webapp config {0} (rg={1}) - {2}".format(self.name, self.resource_group, str(exc))) + + def set_results(self, site_config): + output = dict() + if site_config.ip_security_restrictions: + output['ip_security_restrictions'] = self.to_restriction_dict_list(site_config.ip_security_restrictions) + if site_config.scm_ip_security_restrictions: + output['scm_ip_security_restrictions'] = self.to_restriction_dict_list(site_config.scm_ip_security_restrictions) + output['scm_ip_security_restrictions_use_main'] = site_config.scm_ip_security_restrictions_use_main + return output + + def to_restriction_dict_list(self, restriction_obj_list): + restrictions = [] + if restriction_obj_list: + for r in restriction_obj_list: + restriction = self.to_restriction_dict(r) + if not self.is_azure_default_restriction(restriction): + restrictions.append(restriction) + + return restrictions + + def is_azure_default_restriction(self, restriction_obj): + return (restriction_obj["action"] == "Allow" and restriction_obj["ip_address"] == "Any" and restriction_obj["priority"] == 1) or \ + (restriction_obj["action"] == "Deny" and restriction_obj["ip_address"] == "Any" and restriction_obj["priority"] == 2147483647) + + def to_restriction_dict(self, restriction_obj): + return dict( + name=restriction_obj.name, + description=restriction_obj.description, + action=restriction_obj.action, + priority=restriction_obj.priority, + ip_address=restriction_obj.ip_address, + ) + + +def main(): + AzureRMWebAppAccessRestrictionInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webappslot.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webappslot.py new file mode 100644 index 000000000..08ca490b0 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webappslot.py @@ -0,0 +1,1063 @@ +#!/usr/bin/python +# +# Copyright (c) 2018 Yunge Zhu, +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_webappslot +version_added: "0.1.2" +short_description: Manage Azure Web App slot +description: + - Create, update and delete Azure Web App slot. + +options: + resource_group: + description: + - Name of the resource group to which the resource belongs. + required: True + name: + description: + - Unique name of the deployment slot to create or update. + required: True + webapp_name: + description: + - Web app name which this deployment slot belongs to. + required: True + location: + description: + - Resource location. If not set, location from the resource group will be used as default. + configuration_source: + description: + - Source slot to clone configurations from when creating slot. Use webapp's name to refer to the production slot. + auto_swap_slot_name: + description: + - Used to configure target slot name to auto swap, or disable auto swap. + - Set it target slot name to auto swap. + - Set it to False to disable auto slot swap. + swap: + description: + - Swap deployment slots of a web app. + suboptions: + action: + description: + - Swap types. + - C(preview) is to apply target slot settings on source slot first. + - C(swap) is to complete swapping. + - C(reset) is to reset the swap. + choices: + - preview + - swap + - reset + default: preview + target_slot: + description: + - Name of target slot to swap. If set to None, then swap with production slot. + preserve_vnet: + description: + - C(True) to preserve virtual network to the slot during swap. Otherwise C(False). + type: bool + default: True + frameworks: + description: + - Set of run time framework settings. Each setting is a dictionary. + - See U(https://docs.microsoft.com/en-us/azure/app-service/app-service-web-overview) for more info. + suboptions: + name: + description: + - Name of the framework. + - Supported framework list for Windows web app and Linux web app is different. + - Windows web apps support C(java), C(net_framework), C(php), C(python), and C(node) from June 2018. + - Windows web apps support multiple framework at same time. + - Linux web apps support C(java), C(ruby), C(php), C(dotnetcore), and C(node) from June 2018. + - Linux web apps support only one framework. + - Java framework is mutually exclusive with others. + choices: + - java + - net_framework + - php + - python + - ruby + - dotnetcore + - node + version: + description: + - Version of the framework. For Linux web app supported value, see U(https://aka.ms/linux-stacks) for more info. + - C(net_framework) supported value sample, C(v4.0) for .NET 4.6 and C(v3.0) for .NET 3.5. + - C(php) supported value sample, C(5.5), C(5.6), C(7.0). + - C(python) supported value sample, C(5.5), C(5.6), C(7.0). + - C(node) supported value sample, C(6.6), C(6.9). + - C(dotnetcore) supported value sample, C(1.0), C(1.1), C(1.2). + - C(ruby) supported value sample, 2.3. + - C(java) supported value sample, C(1.9) for Windows web app. C(1.8) for Linux web app. + settings: + description: + - List of settings of the framework. + suboptions: + java_container: + description: + - Name of Java container. This is supported by specific framework C(java) onlys, for example C(Tomcat), C(Jetty). + java_container_version: + description: + - Version of Java container. This is supported by specific framework C(java) only. + - For C(Tomcat), for example C(8.0), C(8.5), C(9.0). For C(Jetty), for example C(9.1), C(9.3). + container_settings: + description: + - Web app slot container settings. + suboptions: + name: + description: + - Name of container, for example C(imagename:tag). + registry_server_url: + description: + - Container registry server URL, for example C(mydockerregistry.io). + registry_server_user: + description: + - The container registry server user name. + registry_server_password: + description: + - The container registry server password. + startup_file: + description: + - The slot startup file. + - This only applies for Linux web app slot. + app_settings: + description: + - Configure web app slot application settings. Suboptions are in key value pair format. + purge_app_settings: + description: + - Purge any existing application settings. Replace slot application settings with app_settings. + type: bool + default: False + deployment_source: + description: + - Deployment source for git. + suboptions: + url: + description: + - Repository URL of deployment source. + branch: + description: + - The branch name of the repository. + app_state: + description: + - Start/Stop/Restart the slot. + type: str + choices: + - started + - stopped + - restarted + default: started + state: + description: + - State of the Web App deployment slot. + - Use C(present) to create or update a slot and C(absent) to delete it. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - azure.azcollection.azure + - azure.azcollection.azure_tags + +author: + - Yunge Zhu(@yungezz) + +''' + +EXAMPLES = ''' + - name: Create a webapp slot + azure_rm_webappslot: + resource_group: myResourceGroup + webapp_name: myJavaWebApp + name: stage + configuration_source: myJavaWebApp + app_settings: + testkey: testvalue + + - name: swap the slot with production slot + azure_rm_webappslot: + resource_group: myResourceGroup + webapp_name: myJavaWebApp + name: stage + swap: + action: swap + + - name: stop the slot + azure_rm_webappslot: + resource_group: myResourceGroup + webapp_name: myJavaWebApp + name: stage + app_state: stopped + + - name: udpate a webapp slot app settings + azure_rm_webappslot: + resource_group: myResourceGroup + webapp_name: myJavaWebApp + name: stage + app_settings: + testkey: testvalue2 + + - name: udpate a webapp slot frameworks + azure_rm_webappslot: + resource_group: myResourceGroup + webapp_name: myJavaWebApp + name: stage + frameworks: + - name: "node" + version: "10.1" +''' + +RETURN = ''' +id: + description: + - ID of current slot. + returned: always + type: str + sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/sites/testapp/slots/stage1 +''' + +import time +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.core.exceptions import ResourceNotFoundError + from azure.core.polling import LROPoller + from azure.mgmt.web.models import Site, NameValuePair, SiteSourceControl, CsmSlotEntity, StringDictionary +except ImportError: + # This is handled in azure_rm_common + pass + +swap_spec = dict( + action=dict( + type='str', + choices=[ + 'preview', + 'swap', + 'reset' + ], + default='preview' + ), + target_slot=dict( + type='str' + ), + preserve_vnet=dict( + type='bool', + default=True + ) +) + +container_settings_spec = dict( + name=dict(type='str', required=True), + registry_server_url=dict(type='str'), + registry_server_user=dict(type='str'), + registry_server_password=dict(type='str', no_log=True) +) + +deployment_source_spec = dict( + url=dict(type='str'), + branch=dict(type='str') +) + + +framework_settings_spec = dict( + java_container=dict(type='str', required=True), + java_container_version=dict(type='str', required=True) +) + + +framework_spec = dict( + name=dict( + type='str', + required=True, + choices=['net_framework', 'java', 'php', 'node', 'python', 'dotnetcore', 'ruby']), + version=dict(type='str', required=True), + settings=dict(type='dict', options=framework_settings_spec) +) + + +def webapp_to_dict(webapp): + return dict( + id=webapp.id, + name=webapp.name, + location=webapp.location, + client_cert_enabled=webapp.client_cert_enabled, + enabled=webapp.enabled, + reserved=webapp.reserved, + client_affinity_enabled=webapp.client_affinity_enabled, + server_farm_id=webapp.server_farm_id, + host_names_disabled=webapp.host_names_disabled, + https_only=webapp.https_only if hasattr(webapp, 'https_only') else None, + skip_custom_domain_verification=webapp.skip_custom_domain_verification if hasattr(webapp, 'skip_custom_domain_verification') else None, + ttl_in_seconds=webapp.ttl_in_seconds if hasattr(webapp, 'ttl_in_seconds') else None, + state=webapp.state, + tags=webapp.tags if webapp.tags else None + ) + + +def slot_to_dict(slot): + return dict( + id=slot.id, + resource_group=slot.resource_group, + server_farm_id=slot.server_farm_id, + target_swap_slot=slot.target_swap_slot, + enabled_host_names=slot.enabled_host_names, + slot_swap_status=slot.slot_swap_status, + name=slot.name, + location=slot.location, + enabled=slot.enabled, + reserved=slot.reserved, + host_names_disabled=slot.host_names_disabled, + state=slot.state, + repository_site_name=slot.repository_site_name, + default_host_name=slot.default_host_name, + kind=slot.kind, + site_config=slot.site_config, + tags=slot.tags if slot.tags else None + ) + + +class Actions: + NoAction, CreateOrUpdate, UpdateAppSettings, Delete = range(4) + + +class AzureRMWebAppSlots(AzureRMModuleBase): + """Configuration class for an Azure RM Web App slot resource""" + + def __init__(self): + self.module_arg_spec = dict( + resource_group=dict( + type='str', + required=True + ), + name=dict( + type='str', + required=True + ), + webapp_name=dict( + type='str', + required=True + ), + location=dict( + type='str' + ), + configuration_source=dict( + type='str' + ), + auto_swap_slot_name=dict( + type='raw' + ), + swap=dict( + type='dict', + options=swap_spec + ), + frameworks=dict( + type='list', + elements='dict', + options=framework_spec + ), + container_settings=dict( + type='dict', + options=container_settings_spec + ), + deployment_source=dict( + type='dict', + options=deployment_source_spec + ), + startup_file=dict( + type='str' + ), + app_settings=dict( + type='dict' + ), + purge_app_settings=dict( + type='bool', + default=False + ), + app_state=dict( + type='str', + choices=['started', 'stopped', 'restarted'], + default='started' + ), + state=dict( + type='str', + default='present', + choices=['present', 'absent'] + ) + ) + + mutually_exclusive = [['container_settings', 'frameworks']] + + self.resource_group = None + self.name = None + self.webapp_name = None + self.location = None + + self.auto_swap_slot_name = None + self.swap = None + self.tags = None + self.startup_file = None + self.configuration_source = None + self.clone = False + + # site config, e.g app settings, ssl + self.site_config = dict() + self.app_settings = dict() + self.app_settings_strDic = None + + # siteSourceControl + self.deployment_source = dict() + + # site, used at level creation, or update. + self.site = None + + # property for internal usage, not used for sdk + self.container_settings = None + + self.purge_app_settings = False + self.app_state = 'started' + + self.results = dict( + changed=False, + id=None, + ) + self.state = None + self.to_do = Actions.NoAction + + self.frameworks = None + + # set site_config value from kwargs + self.site_config_updatable_frameworks = ["net_framework_version", + "java_version", + "php_version", + "python_version", + "linux_fx_version"] + + self.supported_linux_frameworks = ['ruby', 'php', 'dotnetcore', 'node', 'java'] + self.supported_windows_frameworks = ['net_framework', 'php', 'python', 'node', 'java'] + + super(AzureRMWebAppSlots, self).__init__(derived_arg_spec=self.module_arg_spec, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + """Main module execution method""" + + for key in list(self.module_arg_spec.keys()) + ['tags']: + if hasattr(self, key): + setattr(self, key, kwargs[key]) + elif kwargs[key] is not None: + if key == "scm_type": + self.site_config[key] = kwargs[key] + + old_response = None + response = None + to_be_updated = False + + # set location + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + self.location = resource_group.location + + # get web app + webapp_response = self.get_webapp() + + if not webapp_response: + self.fail("Web app {0} does not exist in resource group {1}.".format(self.webapp_name, self.resource_group)) + + # get slot + old_response = self.get_slot() + + # set is_linux + is_linux = True if webapp_response['reserved'] else False + + if self.state == 'present': + if self.frameworks: + # java is mutually exclusive with other frameworks + if len(self.frameworks) > 1 and any(f['name'] == 'java' for f in self.frameworks): + self.fail('Java is mutually exclusive with other frameworks.') + + if is_linux: + if len(self.frameworks) != 1: + self.fail('Can specify one framework only for Linux web app.') + + if self.frameworks[0]['name'] not in self.supported_linux_frameworks: + self.fail('Unsupported framework {0} for Linux web app.'.format(self.frameworks[0]['name'])) + + self.site_config['linux_fx_version'] = (self.frameworks[0]['name'] + '|' + self.frameworks[0]['version']).upper() + + if self.frameworks[0]['name'] == 'java': + if self.frameworks[0]['version'] != '8': + self.fail("Linux web app only supports java 8.") + + if self.frameworks[0].get('settings', {}) and self.frameworks[0]['settings'].get('java_container', None) and \ + self.frameworks[0]['settings']['java_container'].lower() != 'tomcat': + self.fail("Linux web app only supports tomcat container.") + + if self.frameworks[0].get('settings', {}) and self.frameworks[0]['settings'].get('java_container', None) and \ + self.frameworks[0]['settings']['java_container'].lower() == 'tomcat': + self.site_config['linux_fx_version'] = 'TOMCAT|' + self.frameworks[0]['settings']['java_container_version'] + '-jre8' + else: + self.site_config['linux_fx_version'] = 'JAVA|8-jre8' + else: + for fx in self.frameworks: + if fx.get('name') not in self.supported_windows_frameworks: + self.fail('Unsupported framework {0} for Windows web app.'.format(fx.get('name'))) + else: + self.site_config[fx.get('name') + '_version'] = fx.get('version') + + if 'settings' in fx and fx['settings'] is not None: + for key, value in fx['settings'].items(): + self.site_config[key] = value + + if not self.app_settings: + self.app_settings = dict() + + if self.container_settings: + linux_fx_version = 'DOCKER|' + + if self.container_settings.get('registry_server_url'): + self.app_settings['DOCKER_REGISTRY_SERVER_URL'] = 'https://' + self.container_settings['registry_server_url'] + + linux_fx_version += self.container_settings['registry_server_url'] + '/' + + linux_fx_version += self.container_settings['name'] + + self.site_config['linux_fx_version'] = linux_fx_version + + if self.container_settings.get('registry_server_user'): + self.app_settings['DOCKER_REGISTRY_SERVER_USERNAME'] = self.container_settings['registry_server_user'] + + if self.container_settings.get('registry_server_password'): + self.app_settings['DOCKER_REGISTRY_SERVER_PASSWORD'] = self.container_settings['registry_server_password'] + + # set auto_swap_slot_name + if self.auto_swap_slot_name and isinstance(self.auto_swap_slot_name, str): + self.site_config['auto_swap_slot_name'] = self.auto_swap_slot_name + if self.auto_swap_slot_name is False: + self.site_config['auto_swap_slot_name'] = None + + # init site + self.site = Site(location=self.location, site_config=self.site_config) + + # check if the slot already present in the webapp + if not old_response: + self.log("Web App slot doesn't exist") + + to_be_updated = True + self.to_do = Actions.CreateOrUpdate + self.site.tags = self.tags + + # if linux, setup startup_file + if self.startup_file: + self.site_config['app_command_line'] = self.startup_file + + # set app setting + if self.app_settings: + app_settings = [] + for key in self.app_settings.keys(): + app_settings.append(NameValuePair(name=key, value=self.app_settings[key])) + + self.site_config['app_settings'] = app_settings + + # clone slot + if self.configuration_source: + self.clone = True + + else: + # existing slot, do update + self.log("Web App slot already exists") + + self.log('Result: {0}'.format(old_response)) + + update_tags, self.site.tags = self.update_tags(old_response.get('tags', None)) + + if update_tags: + to_be_updated = True + + # check if site_config changed + old_config = self.get_configuration_slot(self.name) + + if self.is_site_config_changed(old_config): + to_be_updated = True + self.to_do = Actions.CreateOrUpdate + + self.app_settings_strDic = self.list_app_settings_slot(self.name) + + # purge existing app_settings: + if self.purge_app_settings: + to_be_updated = True + self.to_do = Actions.UpdateAppSettings + self.app_settings_strDic = dict() + + # check if app settings changed + if self.purge_app_settings or self.is_app_settings_changed(): + to_be_updated = True + self.to_do = Actions.UpdateAppSettings + + if self.app_settings: + for key in self.app_settings.keys(): + self.app_settings_strDic[key] = self.app_settings[key] + + elif self.state == 'absent': + if old_response: + self.log("Delete Web App slot") + self.results['changed'] = True + + if self.check_mode: + return self.results + + self.delete_slot() + + self.log('Web App slot deleted') + + else: + self.log("Web app slot {0} not exists.".format(self.name)) + + if to_be_updated: + self.log('Need to Create/Update web app') + self.results['changed'] = True + + if self.check_mode: + return self.results + + if self.to_do == Actions.CreateOrUpdate: + response = self.create_update_slot() + + self.results['id'] = response['id'] + + if self.clone: + self.clone_slot() + + if self.to_do == Actions.UpdateAppSettings: + self.update_app_settings_slot() + + slot = None + if response: + slot = response + if old_response: + slot = old_response + + if slot: + if (slot['state'] != 'Stopped' and self.app_state == 'stopped') or \ + (slot['state'] != 'Running' and self.app_state == 'started') or \ + self.app_state == 'restarted': + + self.results['changed'] = True + if self.check_mode: + return self.results + + self.set_state_slot(self.app_state) + + if self.swap: + self.results['changed'] = True + if self.check_mode: + return self.results + + self.swap_slot() + + return self.results + + # compare site config + def is_site_config_changed(self, existing_config): + for fx_version in self.site_config_updatable_frameworks: + if self.site_config.get(fx_version): + if not getattr(existing_config, fx_version) or \ + getattr(existing_config, fx_version).upper() != self.site_config.get(fx_version).upper(): + return True + + if self.auto_swap_slot_name is False and existing_config.auto_swap_slot_name is not None: + return True + elif self.auto_swap_slot_name and self.auto_swap_slot_name != getattr(existing_config, 'auto_swap_slot_name', None): + return True + return False + + # comparing existing app setting with input, determine whether it's changed + def is_app_settings_changed(self): + if self.app_settings: + if len(self.app_settings_strDic) != len(self.app_settings): + return True + + if self.app_settings_strDic != self.app_settings: + return True + return False + + # comparing deployment source with input, determine whether it's changed + def is_deployment_source_changed(self, existing_webapp): + if self.deployment_source: + if self.deployment_source.get('url') \ + and self.deployment_source['url'] != existing_webapp.get('site_source_control')['url']: + return True + + if self.deployment_source.get('branch') \ + and self.deployment_source['branch'] != existing_webapp.get('site_source_control')['branch']: + return True + + return False + + def create_update_slot(self): + ''' + Creates or updates Web App slot with the specified configuration. + + :return: deserialized Web App instance state dictionary + ''' + self.log( + "Creating / Updating the Web App slot {0}".format(self.name)) + + try: + response = self.web_client.web_apps.begin_create_or_update_slot(resource_group_name=self.resource_group, + slot=self.name, + name=self.webapp_name, + site_envelope=self.site) + if isinstance(response, LROPoller): + response = self.get_poller_result(response) + + except Exception as exc: + self.log('Error attempting to create the Web App slot instance.') + self.fail("Error creating the Web App slot: {0}".format(str(exc))) + return slot_to_dict(response) + + def delete_slot(self): + ''' + Deletes specified Web App slot in the specified subscription and resource group. + + :return: True + ''' + self.log("Deleting the Web App slot {0}".format(self.name)) + try: + self.web_client.web_apps.delete_slot(resource_group_name=self.resource_group, + name=self.webapp_name, + slot=self.name) + except Exception as e: + self.log('Error attempting to delete the Web App slot.') + self.fail( + "Error deleting the Web App slots: {0}".format(str(e))) + + return True + + def get_webapp(self): + ''' + Gets the properties of the specified Web App. + + :return: deserialized Web App instance state dictionary + ''' + self.log( + "Checking if the Web App instance {0} is present".format(self.webapp_name)) + + response = None + + try: + response = self.web_client.web_apps.get(resource_group_name=self.resource_group, + name=self.webapp_name) + + # Newer SDK versions (0.40.0+) seem to return None if it doesn't exist instead of raising error + if response is not None: + self.log("Response : {0}".format(response)) + self.log("Web App instance : {0} found".format(response.name)) + return webapp_to_dict(response) + + except ResourceNotFoundError: + pass + + self.log("Didn't find web app {0} in resource group {1}".format( + self.webapp_name, self.resource_group)) + + return False + + def get_slot(self): + ''' + Gets the properties of the specified Web App slot. + + :return: deserialized Web App slot state dictionary + ''' + self.log( + "Checking if the Web App slot {0} is present".format(self.name)) + + response = None + + try: + response = self.web_client.web_apps.get_slot(resource_group_name=self.resource_group, + name=self.webapp_name, + slot=self.name) + + # Newer SDK versions (0.40.0+) seem to return None if it doesn't exist instead of raising error + if response is not None: + self.log("Response : {0}".format(response)) + self.log("Web App slot: {0} found".format(response.name)) + return slot_to_dict(response) + + except ResourceNotFoundError: + pass + + self.log("Does not find web app slot {0} in resource group {1}".format(self.name, self.resource_group)) + + return False + + def list_app_settings(self): + ''' + List webapp application settings + :return: deserialized list response + ''' + self.log("List webapp application setting") + + try: + response = self.web_client.web_apps.list_application_settings( + resource_group_name=self.resource_group, name=self.webapp_name) + self.log("Response : {0}".format(response)) + + return response.properties + except Exception as ex: + self.fail("Failed to list application settings for web app {0} in resource group {1}: {2}".format( + self.name, self.resource_group, str(ex))) + + def list_app_settings_slot(self, slot_name): + ''' + List application settings + :return: deserialized list response + ''' + self.log("List application setting") + + try: + response = self.web_client.web_apps.list_application_settings_slot( + resource_group_name=self.resource_group, name=self.webapp_name, slot=slot_name) + self.log("Response : {0}".format(response)) + + return response.properties + except Exception as ex: + self.fail("Failed to list application settings for web app slot {0} in resource group {1}: {2}".format( + self.name, self.resource_group, str(ex))) + + def update_app_settings_slot(self, slot_name=None, app_settings=None): + ''' + Update application settings + :return: deserialized updating response + ''' + self.log("Update application setting") + + if slot_name is None: + slot_name = self.name + if app_settings is None: + app_settings = self.app_settings_strDic + try: + settings = StringDictionary( + properties=self.app_settings + ) + response = self.web_client.web_apps.update_application_settings_slot(resource_group_name=self.resource_group, + name=self.webapp_name, + slot=slot_name, + app_settings=settings) + self.log("Response : {0}".format(response)) + + return response.as_dict() + except Exception as ex: + self.fail("Failed to update application settings for web app slot {0} in resource group {1}: {2}".format( + self.name, self.resource_group, str(ex))) + + return response + + def create_or_update_source_control_slot(self): + ''' + Update site source control + :return: deserialized updating response + ''' + self.log("Update site source control") + + if self.deployment_source is None: + return False + + self.deployment_source['is_manual_integration'] = False + self.deployment_source['is_mercurial'] = False + + try: + site_source_control = SiteSourceControl( + repo_url=self.deployment_source.get('url'), + branch=self.deployment_source.get('branch') + ) + response = self.web_client.web_client.begin_create_or_update_source_control_slot( + resource_group_name=self.resource_group, + name=self.webapp_name, + site_source_control=site_source_control, + slot=self.name) + self.log("Response : {0}".format(response)) + + return response.as_dict() + except Exception as ex: + self.fail("Failed to update site source control for web app slot {0} in resource group {1}: {2}".format( + self.name, self.resource_group, str(ex))) + + def get_configuration(self): + ''' + Get web app configuration + :return: deserialized web app configuration response + ''' + self.log("Get web app configuration") + + try: + response = self.web_client.web_apps.get_configuration( + resource_group_name=self.resource_group, name=self.webapp_name) + self.log("Response : {0}".format(response)) + + return response + except Exception as ex: + self.fail("Failed to get configuration for web app {0} in resource group {1}: {2}".format( + self.webapp_name, self.resource_group, str(ex))) + + def get_configuration_slot(self, slot_name): + ''' + Get slot configuration + :return: deserialized slot configuration response + ''' + self.log("Get web app slot configuration") + + try: + response = self.web_client.web_apps.get_configuration_slot( + resource_group_name=self.resource_group, name=self.webapp_name, slot=slot_name) + self.log("Response : {0}".format(response)) + + return response + except Exception as ex: + self.fail("Failed to get configuration for web app slot {0} in resource group {1}: {2}".format( + slot_name, self.resource_group, str(ex))) + + def update_configuration_slot(self, slot_name=None, site_config=None): + ''' + Update slot configuration + :return: deserialized slot configuration response + ''' + self.log("Update web app slot configuration") + + if slot_name is None: + slot_name = self.name + if site_config is None: + site_config = self.site_config + try: + response = self.web_client.web_apps.update_configuration_slot( + resource_group_name=self.resource_group, name=self.webapp_name, slot=slot_name, site_config=site_config) + self.log("Response : {0}".format(response)) + + return response + except Exception as ex: + self.fail("Failed to update configuration for web app slot {0} in resource group {1}: {2}".format( + slot_name, self.resource_group, str(ex))) + + def set_state_slot(self, appstate): + ''' + Start/stop/restart web app slot + :return: deserialized updating response + ''' + try: + if appstate == 'started': + response = self.web_client.web_apps.start_slot(resource_group_name=self.resource_group, name=self.webapp_name, slot=self.name) + elif appstate == 'stopped': + response = self.web_client.web_apps.stop_slot(resource_group_name=self.resource_group, name=self.webapp_name, slot=self.name) + elif appstate == 'restarted': + response = self.web_client.web_apps.restart_slot(resource_group_name=self.resource_group, name=self.webapp_name, slot=self.name) + else: + self.fail("Invalid web app slot state {0}".format(appstate)) + + self.log("Response : {0}".format(response)) + + return response + except Exception as ex: + request_id = ex.request_id if ex.request_id else '' + self.fail("Failed to {0} web app slot {1} in resource group {2}, request_id {3} - {4}".format( + appstate, self.name, self.resource_group, request_id, str(ex))) + + def swap_slot(self): + ''' + Swap slot + :return: deserialized response + ''' + self.log("Swap slot") + + try: + if self.swap['action'] == 'swap': + if self.swap['target_slot'] is None: + slot_swap_entity = CsmSlotEntity( + target_slot=self.name, + preserve_vnet=self.swap['preserve_vnet'] + ) + response = self.web_client.web_apps.begin_swap_slot_with_production(resource_group_name=self.resource_group, + name=self.webapp_name, + slot_swap_entity=slot_swap_entity) + else: + slot_swap_entity = CsmSlotEntity( + target_slot=self.swap['target_slot'], + preserve_vnet=self.swap['preserve_vnet'] + ) + response = self.web_client.web_apps.begin_swap_slot(resource_group_name=self.resource_group, + name=self.webapp_name, + slot=self.name, + slot_swap_entity=slot_swap_entity) + elif self.swap['action'] == 'preview': + if self.swap['target_slot'] is None: + slot_swap_entity = CsmSlotEntity( + target_slot=self.name, + preserve_vnet=self.swap['preserve_vnet'] + ) + response = self.web_client.web_apps.apply_slot_config_to_production(resource_group_name=self.resource_group, + name=self.webapp_name, + slot_swap_entity=slot_swap_entity) + else: + slot_swap_entity = CsmSlotEntity( + target_slot=self.swap['target_slot'], + preserve_vnet=self.swap['preserve_vnet'] + ) + response = self.web_client.web_apps.apply_slot_configuration_slot(resource_group_name=self.resource_group, + name=self.webapp_name, + slot=self.name, + slot_swap_entity=slot_swap_entity) + elif self.swap['action'] == 'reset': + if self.swap['target_slot'] is None: + response = self.web_client.web_apps.reset_production_slot_config(resource_group_name=self.resource_group, + name=self.webapp_name) + else: + response = self.web_client.web_apps.reset_slot_configuration_slot(resource_group_name=self.resource_group, + name=self.webapp_name, + slot=self.swap['target_slot']) + response = self.web_client.web_apps.reset_slot_configuration_slot(resource_group_name=self.resource_group, + name=self.webapp_name, + slot=self.name) + + self.log("Response : {0}".format(response)) + + return response + except Exception as ex: + self.fail("Failed to swap web app slot {0} in resource group {1}: {2}".format(self.name, self.resource_group, str(ex))) + + def clone_slot(self): + if self.configuration_source: + src_slot = None if self.configuration_source.lower() == self.webapp_name.lower() else self.configuration_source + + if src_slot is None: + site_config_clone_from = self.get_configuration() + else: + site_config_clone_from = self.get_configuration_slot(slot_name=src_slot) + + self.update_configuration_slot(site_config=site_config_clone_from) + + if src_slot is None: + app_setting_clone_from = self.list_app_settings() + else: + app_setting_clone_from = self.list_app_settings_slot(src_slot) + + if self.app_settings: + app_setting_clone_from.update(self.app_settings) + + self.update_app_settings_slot(app_settings=app_setting_clone_from) + + +def main(): + """Main execution""" + AzureRMWebAppSlots() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webappvnetconnection.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webappvnetconnection.py new file mode 100644 index 000000000..4b55a93f4 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webappvnetconnection.py @@ -0,0 +1,259 @@ +#!/usr/bin/python +# +# Copyright (c) 2021 Ross Bender (@l3ender) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_webappvnetconnection +version_added: "1.8.0" +short_description: Manage web app virtual network connection +description: + - Add, remove, or update the virtual network connection for a web app. +options: + name: + description: + - Name of the web app. + required: true + type: str + resource_group: + description: + - Resource group of the web app. + required: true + type: str + state: + description: + - State of the virtual network connection. Use C(present) to create or update and C(absent) to delete. + type: str + default: present + choices: + - absent + - present + vnet_name: + description: + - Name of the virtual network. Required if adding or updating. + type: str + subnet: + description: + - Name of the virtual network's subnet. Required if adding or updating. + type: str + vnet_resource_group: + description: + - Name of the resource group for the virtual network. Defaults to main C(resource_group) value. + type: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Ross Bender (@l3ender) +''' + +EXAMPLES = ''' + - name: Configure web app with virtual network + azure.azcollection.azure_rm_webappvnetconnection: + name: "MyWebapp" + resource_group: "MyResourceGroup" + vnet_name: "MyVnetName" + subnet: "MySubnetName" + + - name: Configure web app with virtual network in different resource group + azure.azcollection.azure_rm_webappvnetconnection: + name: "MyWebapp" + resource_group: "MyResourceGroup" + vnet_name: "MyVnetName" + subnet: "MySubnetName" + vnet_resource_group: "MyOtherResourceGroup" + + - name: Delete web app virtual network + azure.azcollection.azure_rm_webappvnetconnection: + name: "MyWebapp" + resource_group: "MyResourceGroup" + state: "absent" +''' + +RETURN = ''' +connection: + description: + - The web app's virtual network connection. + returned: always + type: complex + contains: + id: + description: + - ID of the web app virtual network connection. + returned: always + type: str + sample: /subscriptions/xxx-xxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/sites/myWebApp/virtualNetworkConnections/yyy-yyy_subnet + name: + description: + - Name of the web app virtual network connection. + returned: always + type: str + sample: yyy-yyy_subnet + subnet_name: + description: + - Name of the subnet connected to the web app. + returned: always + type: str + sample: mySubnet + vnet_name: + description: + - Name of the virtual network connected to the web app. + returned: always + type: str + sample: myVnet + vnet_resource_group: + description: + - Name of the resource group the virtual network is in. + returned: always + type: str + sample: myResourceGroup + vnet_resource_id: + description: + - ID of the virtual network/subnet connected to the web app. + returned: always + type: str + sample: /subscriptions/xxx-xxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/myVnet/subnets/mySubnet +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + +try: + from azure.mgmt.web.models import SwiftVirtualNetwork +except Exception: + # This is handled in azure_rm_common + pass + + +class AzureRMWebAppVnetConnection(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type='str', required=True), + resource_group=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + vnet_name=dict(type='str'), + subnet=dict(type='str'), + vnet_resource_group=dict(type='str'), + ) + + self.results = dict( + changed=False, + connection=dict(), + ) + + self.state = None + self.name = None + self.resource_group = None + self.vnet_name = None + self.subnet = None + self.vnet_resource_group = None + + super(AzureRMWebAppVnetConnection, self).__init__(self.module_arg_spec, + supports_check_mode=True, + supports_tags=False) + + def exec_module(self, **kwargs): + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + changed = False + vnet = self.get_vnet_connection() + if vnet: + self.results['connection'] = self.set_results(vnet) + + if self.state == 'absent' and vnet: + changed = True + if not self.check_mode: + self.log('Deleting vnet connection for webapp {0}'.format(self.name)) + self.delete_vnet_connection() + self.results['connection'] = dict() + elif self.state == 'present': + self.vnet_resource_group = self.vnet_resource_group or self.resource_group + + if not vnet: + self.log('Adding vnet connection for webapp {0}'.format(self.name)) + changed = True + else: + subnet_detail = self.get_subnet_detail(vnet.vnet_resource_id) + if (subnet_detail['resource_group'] != self.vnet_resource_group + or subnet_detail['vnet_name'] != self.vnet_name + or subnet_detail['subnet_name'] != self.subnet): + self.log('Detected change in vnet connection for webapp {0}'.format(self.name)) + changed = True + + if changed: + if not self.check_mode: + self.log('Updating vnet connection for webapp {0}'.format(self.name)) + subnet = self.get_subnet() + param = SwiftVirtualNetwork(subnet_resource_id=subnet.id) + self.create_or_update_vnet_connection(param) + vnet = self.get_vnet_connection() + self.results['connection'] = self.set_results(vnet) + + self.results['changed'] = changed + return self.results + + def get_vnet_connection(self): + connections = self.list_vnet_connections() + for connection in connections: + if connection.is_swift: + return connection + + return None + + def list_vnet_connections(self): + try: + return self.web_client.web_apps.list_vnet_connections(resource_group_name=self.resource_group, name=self.name) + except Exception as exc: + self.fail("Error getting webapp vnet connections {0} (rg={1}) - {2}".format(self.name, self.resource_group, str(exc))) + + def delete_vnet_connection(self): + try: + return self.web_client.web_apps.delete_swift_virtual_network(resource_group_name=self.resource_group, name=self.name) + except Exception as exc: + self.fail("Error deleting webapp vnet connection {0} (rg={1}) - {3}".format(self.name, self.resource_group, str(exc))) + + def create_or_update_vnet_connection(self, vnet): + try: + return self.web_client.web_apps.create_or_update_swift_virtual_network_connection_with_check( + resource_group_name=self.resource_group, name=self.name, connection_envelope=vnet) + except Exception as exc: + self.fail("Error creating/updating webapp vnet connection {0} (vnet={1}, rg={2}) - {3}".format( + self.name, self.vnet_name, self.resource_group, str(exc))) + + def get_subnet(self): + try: + return self.network_client.subnets.get(resource_group_name=self.vnet_resource_group, virtual_network_name=self.vnet_name, subnet_name=self.subnet) + except Exception as exc: + self.fail("Error getting subnet {0} in vnet={1} (rg={2}) - {3}".format(self.subnet, self.vnet_name, self.vnet_resource_group, str(exc))) + + def set_results(self, vnet): + vnet_dict = vnet.as_dict() + + output = dict() + output['id'] = vnet_dict['id'] + output['name'] = vnet_dict['name'] + subnet_id = vnet_dict.get('subnet_resource_id', vnet_dict.get('vnet_resource_id')) + output['vnet_resource_id'] = subnet_id + subnet_detail = self.get_subnet_detail(subnet_id) + output['vnet_resource_group'] = subnet_detail['resource_group'] + output['vnet_name'] = subnet_detail['vnet_name'] + output['subnet_name'] = subnet_detail['subnet_name'] + + return output + + +def main(): + AzureRMWebAppVnetConnection() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webappvnetconnection_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webappvnetconnection_info.py new file mode 100644 index 000000000..8ad772918 --- /dev/null +++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webappvnetconnection_info.py @@ -0,0 +1,164 @@ +#!/usr/bin/python +# +# Copyright (c) 2021 Ross Bender (@l3ender) +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: azure_rm_webappvnetconnection_info + +version_added: "1.8.0" + +short_description: Get Azure web app virtual network connection facts + +description: + - Get facts for a web app's virtual network connection. + +options: + name: + description: + - Name of the web app. + required: true + type: str + resource_group: + description: + - Resource group of the web app. + required: true + type: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Ross Bender (@l3ender) +''' + +EXAMPLES = ''' + - name: Get web app virtual network connection + azure_rm_webappvnetconnection_info: + name: "MyWebapp" + resource_group: "MyResourceGroup" +''' + +RETURN = ''' +connection: + description: + - The web app's virtual network connection. + returned: always + type: complex + contains: + id: + description: + - ID of the web app virtual network connection. + returned: always + type: str + sample: /subscriptions/xxx-xxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/sites/myWebApp/virtualNetworkConnections/yyy-yyy_subnet + name: + description: + - Name of the web app virtual network connection. + returned: always + type: str + sample: yyy-yyy_subnet + subnet_name: + description: + - Name of the subnet connected to the web app. + returned: always + type: str + sample: mySubnet + vnet_name: + description: + - Name of the virtual network connected to the web app. + returned: always + type: str + sample: myVnet + vnet_resource_group: + description: + - Name of the resource group the virtual network is in. + returned: always + type: str + sample: myResourceGroup + vnet_resource_id: + description: + - ID of the virtual network/subnet connected to the web app. + returned: always + type: str + sample: /subscriptions/xxx-xxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/myVnet/subnets/mySubnet +''' + +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase + + +class AzureRMWebAppVnetConnectionInfo(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type='str', required=True), + resource_group=dict(type='str', required=True), + ) + + self.results = dict( + changed=False, + connection=dict(), + ) + + self.name = None + self.resource_group = None + + super(AzureRMWebAppVnetConnectionInfo, self).__init__(self.module_arg_spec, + supports_check_mode=True, + supports_tags=False, + facts_module=True) + + def exec_module(self, **kwargs): + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + vnet = self.get_vnet_connection() + + if vnet: + self.results['connection'] = self.set_results(vnet) + + return self.results + + def get_vnet_connection(self): + connections = self.list_vnet_connections() + for connection in connections: + if connection.is_swift: + return connection + + return None + + def list_vnet_connections(self): + try: + return self.web_client.web_apps.list_vnet_connections(resource_group_name=self.resource_group, name=self.name) + except Exception as exc: + self.fail("Error getting webapp vnet connections {0} (rg={1}) - {2}".format(self.name, self.resource_group, str(exc))) + + def set_results(self, vnet): + vnet_dict = vnet.as_dict() + + output = dict() + output['id'] = vnet_dict['id'] + output['name'] = vnet_dict['name'] + subnet_id = vnet_dict['vnet_resource_id'] + output['vnet_resource_id'] = subnet_id + subnet_detail = self.get_subnet_detail(subnet_id) + output['vnet_resource_group'] = subnet_detail['resource_group'] + output['vnet_name'] = subnet_detail['vnet_name'] + output['subnet_name'] = subnet_detail['subnet_name'] + + return output + + +def main(): + AzureRMWebAppVnetConnectionInfo() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/pr-pipelines.yml b/ansible_collections/azure/azcollection/pr-pipelines.yml new file mode 100644 index 000000000..c8b943d62 --- /dev/null +++ b/ansible_collections/azure/azcollection/pr-pipelines.yml @@ -0,0 +1,264 @@ +parameters: +- name: PYTHON_VER + displayName: 'Python Version' + type: string + default: "3.8" + values: + - "2.7" + - "3.6" + - "3.8" + - "3.9" + - "3.10" + - "3.11" +- name: ANSIBLE_VER + displayName: 'Ansible Version' + type: string + default: "2.12" + values: + - "2.9" + - "2.10" + - "2.11" + - "2.12" + - "2.13" + - "2.14" + - "devel" +- name: MODULE_NAME + displayName: 'Test Module' + type: string + default: 'all' + values: + - 'all' + - 'sanity' + - 'azure_rm_adapplication' + - "azure_rm_acs" + - "azure_rm_adgroup" + - "azure_rm_aduser" + - "azure_rm_aks" + - "azure_rm_aksagentpool" + - "azure_rm_apimanagement" + - "azure_rm_appgateway" + - "azure_rm_appserviceplan" + - "azure_rm_automationaccount" + - "azure_rm_automationrunbook" + - "azure_rm_autoscale" + - "azure_rm_availabilityset" + - "azure_rm_azurefirewall" + - "azure_rm_apimanagementservice" + - "azure_rm_batchaccount" + - "azure_rm_backuppolicy" + - "azure_rm_cdnprofile" + - "azure_rm_containerinstance" + - "azure_rm_containerregistry" + - "azure_rm_containerregistrytag" + - "azure_rm_cosmosdbaccount" + - "azure_rm_datalakestore" + - "azure_rm_ddosprotectionplan" + - "azure_rm_deployment" + - "azure_rm_diskencryptionset" + - "azure_rm_dnsrecordset" + - "azure_rm_dnszone" + - "azure_rm_eventhub" + - "azure_rm_expressroute" + - "azure_rm_firewallpolicy" + - "azure_rm_functionapp" + - "azure_rm_gallery" + - "azure_rm_hdinsightcluster" + - "azure_rm_hostgroup" + - "azure_rm_image" + - "azure_rm_iothub" + - "azure_rm_ipgroup" + - "azure_rm_keyvault" + - "azure_rm_keyvaultkey" + - "azure_rm_keyvaultsecret" + - "azure_rm_loadbalancer" + - "azure_rm_loganalyticsworkspace" + - "azure_rm_manageddisk" + - "azure_rm_managementgroup" + - "azure_rm_mariadbserver" + - "azure_rm_monitordiagnosticsetting" + - "azure_rm_monitorlogprofile" + - "azure_rm_multiplemanageddisks" + - "azure_rm_mysqlserver" + - "azure_rm_natgateway" + - "azure_rm_networkinterface" + - "azure_rm_notificationhub" + - "azure_rm_openshiftmanagedcluster" + - "azure_rm_postgresqlserver" + - "azure_rm_privatednsrecordset" + - "azure_rm_privatednszone" + - "azure_rm_privateendpoint" + - "azure_rm_privateendpointdnszonegroup" + - "azure_rm_privatelinkservice" + - "azure_rm_privatednszonelink" + - "azure_rm_publicipaddress" + - "azure_rm_proximityplacementgroup" + - "azure_rm_rediscache" + - "azure_rm_resource" + - "azure_rm_resourcegroup" + - "azure_rm_routetable" + - "azure_rm_roleassignment" + - "azure_rm_roledefinition" + - "azure_rm_registrationassignment" + - "azure_rm_registrationdefinition" + - "azure_rm_cognitivesearch" + - "azure_rm_securitygroup" + - "azure_rm_servicebus" + - "azure_rm_sqlserver" + - "azure_rm_storageaccount" + - "azure_rm_storageblob" + - "azure_rm_storageshare" + - "azure_rm_subnet" + - "azure_rm_subscription" + - "azure_rm_trafficmanagerprofile" + - "azure_rm_virtualmachine" + - "azure_rm_virtualmachineextension" + - "azure_rm_virtualmachineimage_info" + - "azure_rm_virtualmachinescaleset" + - "azure_rm_virtualmachinesize_info" + - "azure_rm_virtualnetwork" + - "azure_rm_virtualnetworkgateway" + - "azure_rm_virtualnetworkpeering" + - "azure_rm_virtualwan" + - "azure_rm_vpnsite" + - "azure_rm_virtualhub" + - "azure_rm_virtualhubconnection" + - "azure_rm_backupazurevm" + - "azure_rm_recoveryservicesvault" + - "azure_rm_vmbackuppolicy" + - "azure_rm_webapp" + - "azure_rm_webappvnetconnection" + - "azure_rm_webappaccessrestriction" + - "azure_rm_workspace" + - "azure_rm_datafactory" + - "azure_rm_bastionhost" + - "azure_rm_devtestlab" + - "azure_rm_sqlmanagedinstance" + - "azure_rm_vmssnetworkinterface_info" + - "inventory_azure" + - "setup_azure" + +trigger: none + +pr: +- dev + +pool: + name: pool-ubuntu-2004 + +jobs: + - job: CreateResourceGroups + steps: + - bash: | + echo "##vso[task.setvariable variable=resource_group;isOutput=true]ansibletest-$(uuidgen)" + echo "##vso[task.setvariable variable=resource_group_secondary;isOutput=true]ansibletest2-$(uuidgen)" + echo "##vso[task.setvariable variable=resource_group_datalake;isOutput=true]ansibletest-$(uuidgen)" + + name: setvar + - bash: | + echo "Generate test resource group $(setvar.resource_group), $(setvar.resource_group_secondary), $(setvar.resource_group_datalake)" + - task: AzureCLI@2 + inputs: + azureSubscription: '$(SUBSCRIPTION_FULL_NAME)' + scriptType: 'bash' + scriptLocation: 'inlineScript' + inlineScript: | + az group create -l eastus -n $(setvar.resource_group) + az group create -l eastus -n $(setvar.resource_group_secondary) + az group create -l eastus2 -n $(setvar.resource_group_datalake) + + - job: RunTests + dependsOn: CreateResourceGroups + timeoutInMinutes: 120 + variables: + TEST_RESOURCE_GROUP: $[ dependencies.CreateResourceGroups.outputs['setvar.resource_group'] ] + TEST_RESOURCE_GROUP_SECONDARY: $[ dependencies.CreateResourceGroups.outputs['setvar.resource_group_secondary'] ] + TEST_RESOURCE_GROUP_DATALAKE: $[ dependencies.CreateResourceGroups.outputs['setvar.resource_group_datalake'] ] + strategy: + matrix: + "Python${{ parameters.PYTHON_VER }}_sanity": + python.version: '${{ parameters.PYTHON_VER }}' + test.key: 'sanity' + "Python${{ parameters.PYTHON_VER }}_1": + python.version: '${{ parameters.PYTHON_VER }}' + test.key: '1' + "Python${{ parameters.PYTHON_VER }}_2": + python.version: '${{ parameters.PYTHON_VER }}' + test.key: '2' + "Python${{ parameters.PYTHON_VER }}_3": + python.version: '${{ parameters.PYTHON_VER }}' + test.key: '3' + "Python${{ parameters.PYTHON_VER }}_4": + python.version: '${{ parameters.PYTHON_VER }}' + test.key: '4' + "Python${{ parameters.PYTHON_VER }}_5": + python.version: '${{ parameters.PYTHON_VER }}' + test.key: '5' + "Python${{ parameters.PYTHON_VER }}_6": + python.version: '${{ parameters.PYTHON_VER }}' + test.key: '6' + "Python${{ parameters.PYTHON_VER }}_7": + python.version: '${{ parameters.PYTHON_VER }}' + test.key: '7' + "Python${{ parameters.PYTHON_VER }}_9": + python.version: '${{ parameters.PYTHON_VER }}' + test.key: '9' + "Python${{ parameters.PYTHON_VER }}_10": + python.version: '${{ parameters.PYTHON_VER }}' + test.key: '10' + "Python${{ parameters.PYTHON_VER }}_11": + python.version: '${{ parameters.PYTHON_VER }}' + test.key: '11' + "Python${{ parameters.PYTHON_VER }}_12": + python.version: '${{ parameters.PYTHON_VER }}' + test.key: '12' + "Python${{ parameters.PYTHON_VER }}_13": + python.version: '${{ parameters.PYTHON_VER }}' + test.key: '13' + "Python${{ parameters.PYTHON_VER }}_14": + python.version: '${{ parameters.PYTHON_VER }}' + test.key: '14' + "Python${{ parameters.PYTHON_VER }}_15": + python.version: '${{ parameters.PYTHON_VER }}' + test.key: '15' + + steps: + - task: UsePythonVersion@0 + displayName: 'Use Python $(python.version)' + inputs: + versionSpec: '$(python.version)' + + - script: tests/utils/ado/ado.sh $(test.key) ${{ parameters.PYTHON_VER }} ${{ parameters.ANSIBLE_VER }} ${{ parameters.MODULE_NAME }} + env: + SHIPPABLE_BUILD_DIR: $(Build.Repository.LocalPath) + AZURE_CLIENT_ID: $(AZURE_CLIENT_ID) + AZURE_SECRET: $(AZURE_SECRET) + AZURE_SUBSCRIPTION_ID: $(AZURE_SUBSCRIPTION_ID) + AZURE_SUBSCRIPTION_SEC_ID: $(AZURE_SUBSCRIPTION_SEC_ID) + AZURE_TENANT: $(AZURE_TENANT) + AZURE_PRINCIPAL_ID: $(AZURE_PRINCIPAL_ID) + AZURE_MANAGED_BY_TENANT_ID: $(AZURE_MANAGED_BY_TENANT_ID) + AZURE_ROLE_DEFINITION_ID: $(AZURE_ROLE_DEFINITION_ID) + RESOURCE_GROUP: $(TEST_RESOURCE_GROUP) + RESOURCE_GROUP_SECONDARY: $(TEST_RESOURCE_GROUP_SECONDARY) + RESOURCE_GROUP_DATALAKE: $(TEST_RESOURCE_GROUP_DATALAKE) + displayName: 'Running Tests' + + - job: CleanupResourceGroups + dependsOn: + - CreateResourceGroups + - RunTests + condition: always() + variables: + TEST_RESOURCE_GROUP: $[ dependencies.CreateResourceGroups.outputs['setvar.resource_group'] ] + TEST_RESOURCE_GROUP_SECONDARY: $[ dependencies.CreateResourceGroups.outputs['setvar.resource_group_secondary'] ] + TEST_RESOURCE_GROUP_DATALAKE: $[ dependencies.CreateResourceGroups.outputs['setvar.resource_group_datalake'] ] + steps: + - task: AzureCLI@2 + inputs: + azureSubscription: '$(SUBSCRIPTION_FULL_NAME)' + scriptType: 'bash' + scriptLocation: 'inlineScript' + inlineScript: | + az group delete -n $(TEST_RESOURCE_GROUP) --yes --no-wait + az group delete -n $(TEST_RESOURCE_GROUP_SECONDARY) --yes --no-wait + az group delete -n $(TEST_RESOURCE_GROUP_DATALAKE) --yes --no-wait diff --git a/ansible_collections/azure/azcollection/release-pipelines.yml b/ansible_collections/azure/azcollection/release-pipelines.yml new file mode 100644 index 000000000..adb69fb4d --- /dev/null +++ b/ansible_collections/azure/azcollection/release-pipelines.yml @@ -0,0 +1,29 @@ +# Starter pipeline +# Start with a minimal pipeline that you can customize to build and deploy your code. +# Add steps that build, run tests, deploy, and more: +# https://aka.ms/yaml + +trigger: + tags: + include: + - v*.*.* + +pr: none + +pool: + name: pool-ubuntu-2004 + +steps: +- script: | + pip install wheel + pip install ansible==v2.9.0 + ansible --version + displayName: 'install ansible' + +- script: | + ansible-galaxy collection build . + displayName: 'build collection' + +- script: | + ansible-galaxy collection publish *.tar.gz --api-key $(GALAXY_KEY) + displayName: 'publish collection' \ No newline at end of file diff --git a/ansible_collections/azure/azcollection/requirements-azure.txt b/ansible_collections/azure/azcollection/requirements-azure.txt new file mode 100644 index 000000000..32603e2af --- /dev/null +++ b/ansible_collections/azure/azcollection/requirements-azure.txt @@ -0,0 +1,52 @@ +packaging +requests[security] +xmltodict +azure-cli-core==2.34.0 +azure-common==1.1.11 +azure-identity==1.7.0 +azure-mgmt-authorization==2.0.0 +azure-mgmt-apimanagement==3.0.0 +azure-mgmt-batch==5.0.1 +azure-mgmt-cdn==11.0.0 +azure-mgmt-compute==26.1.0 +azure-mgmt-containerinstance==9.0.0 +azure-mgmt-core==1.3.0 +azure-mgmt-containerregistry==9.1.0 +azure-containerregistry==1.1.0 +azure-mgmt-containerservice==20.0.0 +azure-mgmt-datalake-store==1.0.0 +azure-mgmt-datafactory==2.0.0 +azure-mgmt-dns==8.0.0 +azure-mgmt-marketplaceordering==1.1.0 +azure-mgmt-monitor==3.0.0 +azure-mgmt-managedservices==6.0.0 +azure-mgmt-managementgroups==1.0.0 +azure-mgmt-network==19.1.0 +azure-mgmt-nspkg==2.0.0 +azure-mgmt-privatedns==1.0.0 +azure-mgmt-redis==13.0.0 +azure-mgmt-resource==21.1.0 +azure-mgmt-rdbms==10.0.0 +azure-mgmt-search==8.0.0 +azure-mgmt-servicebus==7.1.0 +azure-mgmt-sql==3.0.1 +azure-mgmt-storage==19.0.0 +azure-mgmt-trafficmanager==1.0.0b1 +azure-mgmt-web==6.1.0 +azure-nspkg==2.0.0 +azure-storage-blob==12.11.0 +msrest==0.7.1 +msrestazure==0.6.4 +azure-keyvault==1.1.0 +azure-mgmt-keyvault==10.0.0 +azure-graphrbac==0.61.1 +azure-mgmt-cosmosdb==6.4.0 +azure-mgmt-hdinsight==9.0.0 +azure-mgmt-devtestlabs==9.0.0 +azure-mgmt-loganalytics==12.0.0 +azure-mgmt-automation==1.0.0 +azure-mgmt-iothub==2.2.0 +azure-mgmt-recoveryservices==2.0.0 +azure-mgmt-recoveryservicesbackup==3.0.0 +azure-mgmt-notificationhubs==7.0.0 +azure-mgmt-eventhub==10.1.0 diff --git a/ansible_collections/azure/azcollection/sanity-requirements-azure.txt b/ansible_collections/azure/azcollection/sanity-requirements-azure.txt new file mode 100644 index 000000000..f879956da --- /dev/null +++ b/ansible_collections/azure/azcollection/sanity-requirements-azure.txt @@ -0,0 +1,5 @@ +voluptuous==0.13.1 +pycodestyle==2.8.0 +yamllint==1.26.3 +cryptography==38.0.3 +pylint==2.13.5 diff --git a/ansible_collections/azure/azcollection/shippable.yml b/ansible_collections/azure/azcollection/shippable.yml new file mode 100644 index 000000000..a93819d5f --- /dev/null +++ b/ansible_collections/azure/azcollection/shippable.yml @@ -0,0 +1,40 @@ +language: python + +env: + matrix: + - T=none + +matrix: + exclude: + - env: T=none + include: + - env: T=sanity + + - env: T=azure/2.7/1 + - env: T=azure/3.6/1 + + - env: T=azure/2.7/2 + - env: T=azure/3.6/2 + + - env: T=azure/2.7/5 + - env: T=azure/3.6/5 + + - env: T=azure/2.7/7 + - env: T=azure/3.6/7 +branches: + except: + - "*-patch-*" + - "revert-*-*" + +build: + ci: + - tests/utils/shippable/timing.sh tests/utils/shippable/shippable.sh $T + +integrations: + notifications: + - integrationName: email + type: email + on_success: never + on_failure: never + on_start: never + on_pull_request: never diff --git a/ansible_collections/azure/azcollection/tests/config.yml b/ansible_collections/azure/azcollection/tests/config.yml new file mode 100644 index 000000000..ecfdc9425 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/config.yml @@ -0,0 +1,2 @@ +modules: + python_requires: '>= 3.6' \ No newline at end of file diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_account_info/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_account_info/aliases new file mode 100644 index 000000000..5052c82c7 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_account_info/aliases @@ -0,0 +1 @@ +cloud/azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_account_info/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_account_info/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_account_info/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_account_info/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_account_info/tasks/main.yml new file mode 100644 index 000000000..5c010972f --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_account_info/tasks/main.yml @@ -0,0 +1,17 @@ +- name: Get facts for current logged in user + azure.azcollection.azure_rm_account_info: + register: result + +- assert: + that: + - result is not changed + - result is not failed + - "'account_info' in result" + - "'environmentName' in result.account_info" + - "'homeTenantId' in result.account_info" + - "'id' in result.account_info" + - "'managedByTenants' in result.account_info" + - "'name' in result.account_info" + - "'state' in result.account_info" + - "'tenantId' in result.account_info" + - "'user' in result.account_info" diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_acs/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_acs/aliases new file mode 100644 index 000000000..99b41786a --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_acs/aliases @@ -0,0 +1,4 @@ +cloud/azure +destructive +unsupported +shippable/azure/group10 diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_acs/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_acs/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_acs/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_acs/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_acs/tasks/main.yml new file mode 100644 index 000000000..a35871c09 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_acs/tasks/main.yml @@ -0,0 +1,149 @@ + - name: Create an ACS instance - DCOS + azure_rm_acs: + name: "acsdcos{{ resource_group | hash('md5') | truncate(7, True, '') }}" + resource_group: "{{ resource_group }}" + location: eastus + orchestration_platform: DCOS + master_profile: + - count: 1 + dns_prefix: "acsdcos{{ resource_group | hash('md5') | truncate(10, True, '') }}" + vm_size: Standard_A0 + linux_profile: + - admin_username: azureuser + ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+io238wdhjkasndq238e2/983289dasjnasey823/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+928dfsjsejk298r/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+dsajda82e78sdja/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt juliens@msft.com + agent_pool_profiles: + - name: default + count: 1 + dns_prefix: "acsdcos{{ resource_group | hash('md5') | truncate(12, True, '') }}" + vm_size: Standard_A0 + diagnostics_profile: false + register: output + + - name: Assert the ACS instance is well created + assert: + that: + - output.changed + - output.state.provisioning_state == 'Succeeded' + + - name: Scale the ACS instance from 1 to 2 - DCOS + azure_rm_acs: + name: "acsdcos{{ resource_group | hash('md5') | truncate(7, True, '') }}" + resource_group: "{{ resource_group }}" + location: eastus + orchestration_platform: DCOS + master_profile: + - count: 1 + dns_prefix: "acsdcos{{ resource_group | hash('md5') | truncate(10, True, '') }}" + vm_size: Standard_A0 + linux_profile: + - admin_username: azureuser + ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+io238wdhjkasndq238e2/983289dasjnasey823/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+928dfsjsejk298r/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+dsajda82e78sdja/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt juliens@msft.com + agent_pool_profiles: + - name: default + count: 2 + dns_prefix: "acsdcos{{ resource_group | hash('md5') | truncate(12, True, '') }}" + vm_size: Standard_A0 + diagnostics_profile: false + register: output + + - name: Assert the ACS instance is well scaled + assert: + that: + - output.changed + - output.state.agent_pool_profiles[0].count == 2 + + - name: Delete the DCOS ACS instance - DCOS + azure_rm_acs: + name: "acsdcos{{ resource_group | hash('md5') | truncate(7, True, '') }}" + resource_group: "{{ resource_group }}" + location: eastus + orchestration_platform: DCOS + state: absent + master_profile: + - count: 1 + dns_prefix: "acsdcos{{ resource_group | hash('md5') | truncate(10, True, '') }}" + vm_size: Standard_A0 + linux_profile: + - admin_username: azureuser + ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+io238wdhjkasndq238e2/983289dasjnasey823/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+928dfsjsejk298r/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+dsajda82e78sdja/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt juliens@msft.com + agent_pool_profiles: + - name: default + count: 2 + dns_prefix: "acsdcos{{ resource_group | hash('md5') | truncate(12, True, '') }}" + vm_size: Standard_A0 + diagnostics_profile: false + + - name: Create an ACS instance - Swarm + azure_rm_acs: + name: "acssw{{ resource_group | hash('md5') | truncate(7, True, '') }}" + resource_group: "{{ resource_group }}" + location: eastus + orchestration_platform: Swarm + master_profile: + - count: 1 + dns_prefix: "acssw{{ resource_group | hash('md5') | truncate(10, True, '') }}" + vm_size: Standard_A0 + linux_profile: + - admin_username: azureuser + ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+io238wdhjkasndq238e2/983289dasjnasey823/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+928dfsjsejk298r/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+dsajda82e78sdja/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt juliens@msft.com + agent_pool_profiles: + - name: default + count: 1 + dns_prefix: "acssw{{ resource_group | hash('md5') | truncate(12, True, '') }}" + vm_size: Standard_A0 + diagnostics_profile: false + register: output + + - name: Assert the ACS instance is well created + assert: + that: + - output.changed + - output.state.provisioning_state == 'Succeeded' + + - name: Scale the ACS instance from 1 to 2 - Swarm + azure_rm_acs: + name: "acssw{{ resource_group | hash('md5') | truncate(7, True, '') }}" + resource_group: "{{ resource_group }}" + location: eastus + orchestration_platform: Swarm + master_profile: + - count: 1 + dns_prefix: "acssw{{ resource_group | hash('md5') | truncate(10, True, '') }}" + vm_size: Standard_A0 + linux_profile: + - admin_username: azureuser + ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+io238wdhjkasndq238e2/983289dasjnasey823/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+928dfsjsejk298r/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+dsajda82e78sdja/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt juliens@msft.com + agent_pool_profiles: + - name: default + count: 2 + dns_prefix: "acssw{{ resource_group | hash('md5') | truncate(12, True, '') }}" + vm_size: Standard_A0 + diagnostics_profile: false + register: output + + - name: Assert the ACS instance is well scaled + assert: + that: + - output.changed + - output.state.agent_pool_profiles[0].count == 2 + + - name: Delete the ACS instance - Swarm + azure_rm_acs: + name: "acssw{{ resource_group | hash('md5') | truncate(7, True, '') }}" + resource_group: "{{ resource_group }}" + location: eastus + orchestration_platform: Swarm + state: absent + master_profile: + - count: 1 + dns_prefix: "acssw{{ resource_group | hash('md5') | truncate(10, True, '') }}" + vm_size: Standard_A0 + linux_profile: + - admin_username: azureuser + ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+io238wdhjkasndq238e2/983289dasjnasey823/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+928dfsjsejk298r/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+dsajda82e78sdja/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt juliens@msft.com + agent_pool_profiles: + - name: default + count: 2 + dns_prefix: "acssw{{ resource_group | hash('md5') | truncate(12, True, '') }}" + vm_size: Standard_A0 + diagnostics_profile: false \ No newline at end of file diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adapplication/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adapplication/aliases new file mode 100644 index 000000000..fc8bf1e71 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adapplication/aliases @@ -0,0 +1,4 @@ +cloud/azure +shippable/azure/group10 +disabled +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adapplication/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adapplication/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adapplication/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adapplication/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adapplication/tasks/main.yml new file mode 100644 index 000000000..c8dfcb2b7 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adapplication/tasks/main.yml @@ -0,0 +1,91 @@ +- set_fact: + tenant_id: "{{ azure_tenant }}" + display_name: "app{{ resource_group | hash('sha1') | truncate(20, True, '') }}" + run_once: yes + +- name: Create application + azure_rm_adapplication: + tenant: "{{ tenant_id }}" + display_name: "{{ display_name }}" + register: create_output + +- assert: + that: create_output.changed + +- name: Create application again (idempotent test) + azure_rm_adapplication: + tenant: "{{ tenant_id }}" + app_id: "{{ create_output.app_id }}" + register: output + +- assert: + that: not output.changed + +- name: Create application with more parameter + azure_rm_adapplication: + tenant: "{{ tenant_id }}" + display_name: "{{ display_name }}-01" + available_to_other_tenants: False + credential_description: "for test" + end_date: 2021-10-01 + start_date: 2021-05-18 + identifier_uris: + - "{{ display_name }}.com" + app_roles: + - allowed_member_types: + - User + description: "for app role test" + display_name: "{{ display_name }}_approle" + is_enabled: True + value: Password@0329 + register: second_output + +- assert: + that: second_output.changed + +- name: get ad app info ---- by object ID + azure_rm_adapplication_info: + object_id: "{{ create_output.object_id }}" + tenant: "{{ tenant_id }}" + register: output + +- name: get ad app info ---- by app ID + azure_rm_adapplication_info: + app_id: "{{ create_output.app_id }}" + tenant: "{{ tenant_id }}" + register: output + +- assert: + that: + - output.applications[0].app_display_name == "{{ display_name }}" + - output.applications | length == 1 + +- name: delete ad app by app id + azure_rm_adapplication: + app_id: "{{ create_output.app_id }}" + tenant: "{{ tenant_id }}" + state: absent + register: output + +- assert: + that: output.changed + +- name: delete ad app by app id + azure_rm_adapplication: + app_id: "{{ second_output.app_id }}" + tenant: "{{ tenant_id }}" + state: absent + register: output + +- assert: + that: output.changed + +- name: get ad app info ---- by app id + azure_rm_adapplication_info: + app_id: "{{ create_output.app_id }}" + tenant: "{{ tenant_id }}" + register: output + +- assert: + that: + - output.applications | length == 0 diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adgroup/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adgroup/aliases new file mode 100644 index 000000000..fc8bf1e71 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adgroup/aliases @@ -0,0 +1,4 @@ +cloud/azure +shippable/azure/group10 +disabled +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adgroup/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adgroup/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adgroup/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adgroup/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adgroup/tasks/main.yml new file mode 100644 index 000000000..09b3f686e --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adgroup/tasks/main.yml @@ -0,0 +1,239 @@ +- set_fact: + tenant_id: "{{ azure_tenant }}" + resource_prefix: "{{ 999999999999999999994 | random | to_uuid }}" + run_once: yes + +- name: Try to return non-existent group using display name + azure_rm_adgroup_info: + attribute_name: "displayName" + attribute_value: "{{ resource_prefix }}-Group-Root" + tenant: "{{ tenant_id }}" + register: get_nonexistent_group_display_name_ShouldFail + failed_when: + - get_nonexistent_group_display_name_ShouldFail.ad_groups != [] + +- name: Create Group Root + azure_rm_adgroup: + tenant: "{{ tenant_id }}" + display_name: "{{ resource_prefix }}-Group-Root" + mail_nickname: "{{ resource_prefix }}-Group-Root" + state: 'present' + register: group_create_changed_ShouldPass + +- name: Create Group Should Return Not Changed + azure_rm_adgroup: + tenant: "{{ tenant_id }}" + display_name: "{{ resource_prefix }}-Group-Root" + mail_nickname: "{{ resource_prefix }}-Group-Root" + state: 'present' + register: group_create_unchanged_ShouldPass + +- name: Assert Otherwise Changed Returns are Equal + assert: + that: + - group_create_changed_ShouldPass.changed == True + - group_create_unchanged_ShouldPass.changed == False + - group_create_changed_ShouldPass.display_name == group_create_unchanged_ShouldPass.display_name + - group_create_changed_ShouldPass.mail_enabled == group_create_unchanged_ShouldPass.mail_enabled + - group_create_changed_ShouldPass.mail_nickname == group_create_unchanged_ShouldPass.mail_nickname + - group_create_changed_ShouldPass.object_id == group_create_unchanged_ShouldPass.object_id + - group_create_changed_ShouldPass.security_enabled == group_create_unchanged_ShouldPass.security_enabled + +- name: Return previously created group using object_id + azure_rm_adgroup_info: + object_id: "{{ group_create_unchanged_ShouldPass.object_id }}" + tenant: "{{ tenant_id }}" + register: get_created_object_id_ShouldPass + +- name: Assert Returns are Equal to Created Group + assert: + that: + - get_created_object_id_ShouldPass.ad_groups[0].object_id == group_create_unchanged_ShouldPass.object_id + +- name: Create Group Member 1 + azure_rm_adgroup: + tenant: "{{ tenant_id }}" + display_name: "{{ resource_prefix }}-Group-Member-1" + mail_nickname: "{{ resource_prefix }}-Group-Member-1" + state: 'present' + register: create_group_member_1_ShouldPass + +- name: Create Group Member 2 + azure_rm_adgroup: + tenant: "{{ tenant_id }}" + display_name: "{{ resource_prefix }}-Group-Member-2" + mail_nickname: "{{ resource_prefix }}-Group-Member-2" + state: 'present' + register: create_group_member_2_ShouldPass + +- name: Ensure member is in group using display_name and mail_nickname + azure_rm_adgroup: + tenant: "{{ tenant_id }}" + display_name: "{{ resource_prefix }}-Group-Root" + mail_nickname: "{{ resource_prefix }}-Group-Root" + state: 'present' + present_members: + - "https://graph.windows.net/{{ tenant_id }}/directoryObjects/{{ create_group_member_1_ShouldPass.object_id }}" + - "https://graph.windows.net/{{ tenant_id }}/directoryObjects/{{ create_group_member_2_ShouldPass.object_id }}" + register: add_members_to_group_ShouldPass + +- name: Validate members are in the group + assert: + that: + - add_members_to_group_ShouldPass.group_members[0].object_id == create_group_member_1_ShouldPass.object_id or add_members_to_group_ShouldPass.group_members[1].object_id == create_group_member_1_ShouldPass.object_id + - add_members_to_group_ShouldPass.group_members[1].object_id == create_group_member_2_ShouldPass.object_id or add_members_to_group_ShouldPass.group_members[0].object_id == create_group_member_2_ShouldPass.object_id + +- name: Ensure member is in group that is already present using object_id + azure_rm_adgroup: + tenant: "{{ tenant_id }}" + object_id: "{{ group_create_changed_ShouldPass.object_id }}" + state: 'present' + present_members: + - "https://graph.windows.net/{{ tenant_id }}/directoryObjects/{{ create_group_member_1_ShouldPass.object_id }}" + register: add_already_present_member_to_group_ShouldPass + +- name: Validate nothing changed from already present member + assert: + that: + - add_already_present_member_to_group_ShouldPass.changed == false + +- name: Ensure member is not in group using object_id + azure_rm_adgroup: + tenant: "{{ tenant_id }}" + object_id: "{{ group_create_changed_ShouldPass.object_id }}" + state: 'present' + absent_members: + - "{{ create_group_member_2_ShouldPass.object_id }}" + register: remove_member_from_group_ShouldPass + +- name: Validate Group Member 1 is in the group and Group Member 2 is not + assert: + that: + - remove_member_from_group_ShouldPass.group_members[0].object_id == create_group_member_1_ShouldPass.object_id + - remove_member_from_group_ShouldPass.group_members | length == 1 + +- name: Ensure member is not in group that is already not in group using display_name and mail_nickname + azure_rm_adgroup: + tenant: "{{ tenant_id }}" + display_name: "{{ resource_prefix }}-Group-Root" + mail_nickname: "{{ resource_prefix }}-Group-Root" + state: 'present' + absent_members: + - "{{ create_group_member_2_ShouldPass.object_id }}" + register: remove_already_absent_member_from_group_ShouldPass + +- name: Validate nothing changed from already absent member + assert: + that: + - remove_already_absent_member_from_group_ShouldPass.changed == false + +- name: Return a specific group using object_id + azure_rm_adgroup_info: + object_id: "{{ group_create_changed_ShouldPass.object_id }}" + tenant: "{{ tenant_id }}" + register: object_id_ShouldPass + +- name: Return a specific group using object_id and return_owners + azure_rm_adgroup_info: + object_id: "{{ group_create_changed_ShouldPass.object_id }}" + return_owners: True + tenant: "{{ tenant_id }}" + register: object_id_return_owners_ShouldPass + +- name: Return a specific group using object_id and return_owners and return_group_members + azure_rm_adgroup_info: + object_id: "{{ group_create_changed_ShouldPass.object_id }}" + return_owners: True + return_group_members: True + tenant: "{{ tenant_id }}" + register: object_id_return_owners_and_group_members_ShouldPass + +- name: Return a specific group using object_id and member_groups + azure_rm_adgroup_info: + object_id: "{{ group_create_changed_ShouldPass.object_id }}" + return_member_groups: True + tenant: "{{ tenant_id }}" + register: object_id_return_member_groups_ShouldPass + +- name: Return a specific group using object_id and check_membership + azure_rm_adgroup_info: + object_id: "{{ group_create_changed_ShouldPass.object_id }}" + check_membership: "{{ create_group_member_1_ShouldPass.object_id }}" + tenant: "{{ tenant_id }}" + register: object_id_return_check_membership_ShouldPass + +- name: Return a specific group using displayName attribute + azure_rm_adgroup_info: + attribute_name: "displayName" + attribute_value: "{{ group_create_changed_ShouldPass.display_name }}" + tenant: "{{ tenant_id }}" + register: displayName_attribute_ShouldPass + +- name: Return a specific group using mailNickname filter + azure_rm_adgroup_info: + odata_filter: "mailNickname eq '{{ group_create_changed_ShouldPass.mail_nickname }}'" + tenant: "{{ tenant_id }}" + register: mailNickname_filter_ShouldPass + +- name: Return a different group using displayName attribute + azure_rm_adgroup_info: + attribute_name: "displayName" + attribute_value: "{{ create_group_member_2_ShouldPass.display_name }}" + tenant: "{{ tenant_id }}" + register: displayName_attribute_different_ShouldPass + +- name: Assert All Returns Are Equal + assert: + that: + - object_id_ShouldPass == displayName_attribute_ShouldPass + - object_id_ShouldPass == mailNickname_filter_ShouldPass + +- name: Assert Returns Are Not Equal + assert: + that: + - object_id_ShouldPass != displayName_attribute_different_ShouldPass + +- name: Delete group Group Root on object_id + azure_rm_adgroup: + tenant: "{{ tenant_id }}" + object_id: "{{ group_create_unchanged_ShouldPass.object_id }}" + state: 'absent' + register: group_delete_group_root_ShouldPass + +- name: Try to return now deleted group Group Root using object_id + azure_rm_adgroup_info: + object_id: "{{ group_create_unchanged_ShouldPass.object_id }}" + tenant: "{{ tenant_id }}" + register: get_deleted_object_group_root_ShouldFail + failed_when: + - '"failed to get ad group info Resource" not in get_deleted_object_group_root_ShouldFail.msg' + +- name: Delete group Group Member 1 on object_id + azure_rm_adgroup: + tenant: "{{ tenant_id }}" + object_id: "{{ create_group_member_1_ShouldPass.object_id }}" + state: 'absent' + register: group_delete_group_member_1_ShouldPass + +- name: Try to return now deleted group Group Member 1 using object_id + azure_rm_adgroup_info: + object_id: "{{ create_group_member_1_ShouldPass.object_id }}" + tenant: "{{ tenant_id }}" + register: get_deleted_object_group_member_1_ShouldFail + failed_when: + - '"failed to get ad group info Resource" not in get_deleted_object_group_member_1_ShouldFail.msg' + +- name: Delete group Group Member 2 on object_id + azure_rm_adgroup: + tenant: "{{ tenant_id }}" + object_id: "{{ create_group_member_2_ShouldPass.object_id }}" + state: 'absent' + register: group_delete_group_member_2_ShouldPass + +- name: Try to return now deleted group Group Member 2 using object_id + azure_rm_adgroup_info: + object_id: "{{ create_group_member_2_ShouldPass.object_id }}" + tenant: "{{ tenant_id }}" + register: get_deleted_object_group_member_2_ShouldFail + failed_when: + - '"failed to get ad group info Resource" not in get_deleted_object_group_member_2_ShouldFail.msg' diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adpassword/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adpassword/aliases new file mode 100644 index 000000000..fc8bf1e71 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adpassword/aliases @@ -0,0 +1,4 @@ +cloud/azure +shippable/azure/group10 +disabled +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adpassword/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adpassword/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adpassword/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adpassword/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adpassword/tasks/main.yml new file mode 100644 index 000000000..7c3782f17 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adpassword/tasks/main.yml @@ -0,0 +1,103 @@ +- set_fact: + app_id: "e0a62513-1d81-480e-a6dc-5c99cdd58d9a" + tenant_id: "72f988bf-86f1-41af-91ab-2d7cd011db47" + app_object_id: "b52e8275-a1ee-4c4a-86ff-15992e0920ed" + +- name: create first ad password by app_id + azure_rm_adpassword: + app_id: "{{ app_id }}" + value: "Password@032900001" + tenant: "{{ tenant_id }}" + state: present + register: ad_fact + +- assert: + that: + - ad_fact.changed + +- name: create second ad password by app_object_id + azure_rm_adpassword: + value: "Password@032900002" + tenant: "{{ tenant_id }}" + app_object_id: "{{ app_object_id }}" + state: present + register: ad_fact02 + +- assert: + that: + - ad_fact02.changed + +- name: create ad service principal + azure_rm_adserviceprincipal: + app_id: "{{ app_id }}" + tenant: "{{ tenant_id }}" + state: present + +- name: Get ad service principal info + azure_rm_adserviceprincipal_info: + tenant: "{{ tenant_id }}" + app_id: "{{ app_id }}" + register: sp_info +- debug: + var: sp_info + +- name: create third ad password by service_principal_object_id + azure_rm_adpassword: + value: "Password@032900003" + tenant: "{{ tenant_id }}" + service_principal_object_id: "{{ sp_info.service_principals[0].object_id }}" + state: present + register: ad_fact03 + +- assert: + that: + - ad_fact03.changed + +- name: can't update ad password + azure_rm_adpassword: + app_id: "{{ app_id }}" + value: "Password@032900003" + tenant: "{{ tenant_id }}" + key_id: "{{ ad_fact.key_id }}" + app_object_id: "{{ app_object_id }}" + state: present + register: output + ignore_errors: True + +- name: Get ad password info + azure_rm_adpassword_info: + app_id: "{{ app_id }}" + tenant: "{{ tenant_id }}" + key_id: "{{ ad_fact.key_id }}" + app_object_id: "{{ app_object_id }}" + register: ad_info + +- assert: + that: + - ad_info.passwords[0].start_date == ad_fact.start_date + - ad_info.passwords[0].end_date == ad_fact.end_date + +- name: delete one ad password + azure_rm_adpassword: + app_id: "{{ app_id }}" + key_id: "{{ ad_fact.key_id }}" + tenant: "{{ tenant_id }}" + app_object_id: "{{ app_object_id }}" + state: absent + register: output + +- assert: + that: + - output.changed + +- name: delete all ad password + azure_rm_adpassword: + app_id: "{{ app_id }}" + tenant: "{{ tenant_id }}" + app_object_id: "{{ app_object_id }}" + state: absent + register: output + +- assert: + that: + - output.changed diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adserviceprincipal/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adserviceprincipal/aliases new file mode 100644 index 000000000..6feba04aa --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adserviceprincipal/aliases @@ -0,0 +1,4 @@ +cloud/azure +shippable/azure/group10 +destructive +disabled diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adserviceprincipal/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adserviceprincipal/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adserviceprincipal/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adserviceprincipal/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adserviceprincipal/tasks/main.yml new file mode 100644 index 000000000..4e4b50161 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adserviceprincipal/tasks/main.yml @@ -0,0 +1,72 @@ +- set_fact: + app_id: "e0a62513-1d81-480e-a6dc-5c99cdd58d9a" + tenant_id: "72f988bf-86f1-41af-91ab-2d7cd011db47" + +- name: delete ad service principal + azure_rm_adserviceprincipal: + app_id: "{{ app_id }}" + tenant: "{{ tenant_id }}" + state: absent + +- name: create ad service principal + azure_rm_adserviceprincipal: + app_id: "{{ app_id }}" + tenant: "{{ tenant_id }}" + state: present + register: ad_fact + +- assert: + that: + - ad_fact.changed + +- name: create ad service principal (idempontent) + azure_rm_adserviceprincipal: + app_id: "{{ app_id }}" + tenant: "{{ tenant_id }}" + state: present + register: output + +- assert: + that: + - not output.changed + +- name: Get ad service principal info by app_id + azure_rm_adserviceprincipal_info: + app_id: "{{ app_id }}" + tenant: "{{ tenant_id }}" + register: ad_info + +- assert: + that: + - ad_info.service_principals[0].app_display_name == ad_fact.app_display_name + - ad_info.service_principals[0].app_role_assignment_required == False + +- name: update ad service principal app_role_assignmentrequired to True + azure_rm_adserviceprincipal: + app_id: "{{ app_id }}" + tenant: "{{ tenant_id }}" + app_role_assignment_required: True + state: present + register: output + +- name: Get ad service principal info by object_id + azure_rm_adserviceprincipal_info: + tenant: "{{ tenant_id }}" + object_id: "{{ ad_info.service_principals[0].object_id }}" + register: ad_info + +- assert: + that: + - ad_info.service_principals[0].app_display_name == ad_fact.app_display_name + - ad_info.service_principals[0].app_role_assignment_required == True + +- name: delete ad service principal + azure_rm_adserviceprincipal: + app_id: "{{ app_id }}" + tenant: "{{ tenant_id }}" + state: absent + register: output + +- assert: + that: + - output.changed diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aduser/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aduser/aliases new file mode 100644 index 000000000..398866a2f --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aduser/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group1 +disabled diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aduser/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aduser/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aduser/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aduser/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aduser/tasks/main.yml new file mode 100644 index 000000000..98dfc3241 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aduser/tasks/main.yml @@ -0,0 +1,165 @@ +- name: Prepare facts + set_fact: + tenant_id: "{{ azure_tenant }}" + user_id: "user{{ 999999999999999999994 | random | to_uuid }}@contoso.com" + object_id: "{{ 999999999999999999994 | random | to_uuid }}" + user_principal_name: "{{ 999999999999999999994 | random | to_uuid }}" + run_once: yes + +- name: Create test user + azure_rm_aduser: + user_principal_name: "{{ user_id }}" + tenant: "{{ tenant_id }}" + state: "present" + account_enabled: "True" + display_name: "Test_{{ user_principal_name }}_Display_Name" + password_profile: "password" + mail_nickname: "Test_{{ user_principal_name }}_mail_nickname" + immutable_id: "{{ object_id }}" + given_name: "First" + surname: "Last" + user_type: "Member" + usage_location: "US" + mail: "{{ user_principal_name }}@contoso.com" + register: create_user_should_pass + +- name: Try to update existing user - idempotent check + azure_rm_aduser: + user_principal_name: "{{ user_id }}" + tenant: "{{ tenant_id }}" + state: "present" + display_name: "Test_{{ user_principal_name }}_Display_Name" + mail_nickname: "Test_{{ user_principal_name }}_mail_nickname" + given_name: "First" + surname: "Last" + mail: "{{ user_principal_name }}@contoso.com" + register: attempted_update_with_no_changes_should_pass + +- name: Assert Nothing Changed + assert: + that: + - attempted_update_with_no_changes_should_pass["changed"] == False + +- name: user_principal_name Should Pass + azure_rm_aduser_info: + user_principal_name: "{{ user_id }}" + tenant: "{{ tenant_id }}" + register: get_user_should_pass + +- name: Assert user was created and account is enabled + assert: + that: + - "create_user_should_pass['ad_users'][0]['account_enabled'] == True" + - "get_user_should_pass['ad_users'][0]['account_enabled'] == True" + +- name: Update test user + azure_rm_aduser: + user_principal_name: "{{ user_id }}" + tenant: "{{ tenant_id }}" + state: "present" + account_enabled: "False" + register: update_user_should_pass + +- name: user_principal_name on updated user Should Pass + azure_rm_aduser_info: + user_principal_name: "{{ user_id }}" + tenant: "{{ tenant_id }}" + register: get_updated_user_should_pass + +- name: Assert user was updated and account is disabled + assert: + that: + - "update_user_should_pass['ad_users'][0]['account_enabled'] == False" + - "get_updated_user_should_pass['ad_users'][0]['account_enabled'] == False" + +- name: Delete test user + azure_rm_aduser: + user_principal_name: "{{ user_id }}" + tenant: "{{ tenant_id }}" + state: "absent" + register: delete_user_should_pass + +- name: user_principal_name Should Fail + azure_rm_aduser_info: + user_principal_name: "{{ user_id }}" + tenant: "{{ tenant_id }}" + register: get_user_should_fail + ignore_errors: true + +- name: Assert task failed + assert: + that: + - "get_user_should_fail['failed'] == True" + +- name: Run with bad tenant Should Fail + azure_rm_aduser_info: + user_principal_name: "{{user_id}}" + tenant: None + register: missing_tenant + ignore_errors: yes + +- name: Assert task failed + assert: + that: + - "missing_tenant['failed'] == True" + +- name: Missing any identifiers Should Fail + azure_rm_aduser_info: + tenant: "{{ tenant_id }}" + register: missing_any_identifiers + ignore_errors: yes + +- name: Assert task failed + assert: + that: + - "missing_any_identifiers is undefined" + +- name: Too many identifiers Should Fail + azure_rm_aduser_info: + user_principal_name: "{{ user_id }}" + object_id: "{{ object_id }}" + tenant: "{{ tenant_id }}" + register: too_many_identifiers + ignore_errors: yes + +- name: Assert task failed + assert: + that: + - "too_many_identifiers is undefined" + +- name: Missing attribute_value Should Fail + azure_rm_aduser_info: + attribute_name: proxyAddresses + tenant: "{{ tenant_id }}" + register: missing attribute_value + ignore_errors: yes + +- name: Assert task failed + assert: + that: + - "attribute_value is undefined" + +- name: Missing attribute_name Should Fail + azure_rm_aduser_info: + attribute_value: SMTP:user@contoso.com + tenant: "{{ tenant_id }}" + register: missing attribute_name + ignore_errors: yes + +- name: Assert task failed + assert: + that: + - "attribute_name is undefined" + +- name: Using all with principal name should fail + azure_rm_aduser_info: + all: True + user_principal_name: "{{ user_id }}" + tenant: "{{ tenant_id }}" + register: using_all_with_principal_name + ignore_errors: yes + +- name: Assert task failed + assert: + that: + - "using_all_with_principal_name is undefined" diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aks/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aks/aliases new file mode 100644 index 000000000..7aca0ce4f --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aks/aliases @@ -0,0 +1,3 @@ +cloud/azure +destructive +shippable/azure/group11 diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aks/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aks/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aks/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aks/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aks/tasks/main.yml new file mode 100644 index 000000000..cf3f856ec --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aks/tasks/main.yml @@ -0,0 +1,572 @@ + - set_fact: + rpfx: "{{ resource_group | hash('md5') | truncate(8, True, '') }}" + noderpfx: "{{ resource_group | hash('md5') | truncate(4, True, '') }}" + + - include: minimal-cluster.yml + + - name: Find available k8s version + azure_rm_aksversion_info: + location: eastus + register: versions + + - name: Create an AKS instance (check mode) + azure_rm_aks: + name: "aks{{ rpfx }}" + resource_group: "{{ resource_group }}" + location: eastus + dns_prefix: "aks{{ rpfx }}" + kubernetes_version: "{{ versions.azure_aks_versions[0] }}" + service_principal: + client_id: "{{ azure_client_id }}" + client_secret: "{{ azure_secret }}" + linux_profile: + admin_username: azureuser + ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSPmiqkvDH1/+MDAVDZT8381aYqp73Odz8cnD5hegNhqtXajqtiH0umVg7HybX3wt1HjcrwKJovZURcIbbcDvzdH2bnYbF93T4OLXA0bIfuIp6M86x1iutFtXdpN3TTicINrmSXEE2Ydm51iMu77B08ZERjVaToya2F7vC+egfoPvibf7OLxE336a5tPCywavvNihQjL8sjgpDT5AAScjb3YqK/6VLeQ18Ggt8/ufINsYkb+9/Ji/3OcGFeflnDXq80vPUyF3u4iIylob6RSZenC38cXmQB05tRNxS1B6BXCjMRdy0v4pa7oKM2GA4ADKpNrr0RI9ed+peRFwmsclH test@ansible + agent_pool_profiles: + - name: default + count: 1 + vm_size: Standard_B2s + type: VirtualMachineScaleSets + mode: System + node_labels: {"release":"stable"} + max_pods: 42 + availability_zones: + - 1 + - 2 + node_resource_group: "node{{ noderpfx }}" + enable_rbac: yes + network_profile: + load_balancer_sku: standard + check_mode: yes + + - name: Check there is no AKS created + azure_rm_aks_info: + name: "aks{{ rpfx }}" + resource_group: "{{ resource_group }}" + register: fact + + - name: Check there is no AKS created + assert: + that: + - "fact.aks | length == 0" + + - name: Create an AKS instance + azure_rm_aks: + name: "aks{{ rpfx }}" + resource_group: "{{ resource_group }}" + location: eastus + dns_prefix: "aks{{ rpfx }}" + kubernetes_version: "{{ versions.azure_aks_versions[0] }}" + service_principal: + client_id: "{{ azure_client_id }}" + client_secret: "{{ azure_secret }}" + linux_profile: + admin_username: azureuser + ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSPmiqkvDH1/+MDAVDZT8381aYqp73Odz8cnD5hegNhqtXajqtiH0umVg7HybX3wt1HjcrwKJovZURcIbbcDvzdH2bnYbF93T4OLXA0bIfuIp6M86x1iutFtXdpN3TTicINrmSXEE2Ydm51iMu77B08ZERjVaToya2F7vC+egfoPvibf7OLxE336a5tPCywavvNihQjL8sjgpDT5AAScjb3YqK/6VLeQ18Ggt8/ufINsYkb+9/Ji/3OcGFeflnDXq80vPUyF3u4iIylob6RSZenC38cXmQB05tRNxS1B6BXCjMRdy0v4pa7oKM2GA4ADKpNrr0RI9ed+peRFwmsclH test@ansible + agent_pool_profiles: + - name: default + count: 1 + vm_size: Standard_B2s + type: VirtualMachineScaleSets + mode: System + node_labels: {"release":"stable"} + max_pods: 42 + availability_zones: + - 1 + - 2 + node_resource_group: "node{{ noderpfx }}" + enable_rbac: yes + network_profile: + load_balancer_sku: standard + register: output + + - name: Assert the AKS instance is well created + assert: + that: + - output.changed + - output.provisioning_state == 'Succeeded' + + - name: Get AKS fact + azure_rm_aks_info: + name: "aks{{ rpfx }}" + resource_group: "{{ resource_group }}" + register: fact + + - name: Assert fact returns the created one + assert: + that: + - "fact.aks | length == 1" + - fact.aks[0].id == output.id + - fact.aks[0].properties.agentPoolProfiles[0].availabilityZones == ["1", "2"] + - fact.aks[0].properties.agentPoolProfiles[0].mode == "System" + - fact.aks[0].properties.agentPoolProfiles[0].nodeLabels | length == 1 + + - name: Update an AKS instance node_labels + azure_rm_aks: + name: "aks{{ rpfx }}" + resource_group: "{{ resource_group }}" + location: eastus + dns_prefix: "aks{{ rpfx }}" + kubernetes_version: "{{ versions.azure_aks_versions[0] }}" + service_principal: + client_id: "{{ azure_client_id }}" + client_secret: "{{ azure_secret }}" + linux_profile: + admin_username: azureuser + ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSPmiqkvDH1/+MDAVDZT8381aYqp73Odz8cnD5hegNhqtXajqtiH0umVg7HybX3wt1HjcrwKJovZURcIbbcDvzdH2bnYbF93T4OLXA0bIfuIp6M86x1iutFtXdpN3TTicINrmSXEE2Ydm51iMu77B08ZERjVaToya2F7vC+egfoPvibf7OLxE336a5tPCywavvNihQjL8sjgpDT5AAScjb3YqK/6VLeQ18Ggt8/ufINsYkb+9/Ji/3OcGFeflnDXq80vPUyF3u4iIylob6RSZenC38cXmQB05tRNxS1B6BXCjMRdy0v4pa7oKM2GA4ADKpNrr0RI9ed+peRFwmsclH test@ansible + agent_pool_profiles: + - name: default + count: 1 + vm_size: Standard_B2s + type: VirtualMachineScaleSets + mode: System + node_labels: {"release":"stable", "environment":"dev"} + max_pods: 42 + availability_zones: + - 1 + - 2 + node_resource_group: "node{{ noderpfx }}" + enable_rbac: yes + network_profile: + load_balancer_sku: standard + register: output + + - name: Assert the AKS instance is well update + assert: + that: + - output.changed + + - name: Get AKS fact + azure_rm_aks_info: + name: "aks{{ rpfx }}" + resource_group: "{{ resource_group }}" + register: fact + + - name: Assert fact returns the created one + assert: + that: + - "fact.aks | length == 1" + - fact.aks[0].id == output.id + - fact.aks[0].properties.agentPoolProfiles[0].nodeLabels | length == 2 + + - name: Get AKS upgrade versions + azure_rm_aksupgrade_info: + name: "aks{{ rpfx }}" + resource_group: "{{ resource_group }}" + register: upgrades + + - name: Assert available control-plane versions for upgrade + assert: + that: + - "upgrades.azure_aks_upgrades.control_plane_profile.kubernetes_version == versions.azure_aks_versions[0]" + - "upgrades.azure_aks_upgrades.control_plane_profile.upgrades | length > 0" + + - name: Create an AKS instance (idempotent) + azure_rm_aks: + name: "aks{{ rpfx }}" + resource_group: "{{ resource_group }}" + location: eastus + dns_prefix: "aks{{ rpfx }}" + kubernetes_version: "{{ versions.azure_aks_versions[0] }}" + service_principal: + client_id: "{{ azure_client_id }}" + linux_profile: + admin_username: azureuser + ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSPmiqkvDH1/+MDAVDZT8381aYqp73Odz8cnD5hegNhqtXajqtiH0umVg7HybX3wt1HjcrwKJovZURcIbbcDvzdH2bnYbF93T4OLXA0bIfuIp6M86x1iutFtXdpN3TTicINrmSXEE2Ydm51iMu77B08ZERjVaToya2F7vC+egfoPvibf7OLxE336a5tPCywavvNihQjL8sjgpDT5AAScjb3YqK/6VLeQ18Ggt8/ufINsYkb+9/Ji/3OcGFeflnDXq80vPUyF3u4iIylob6RSZenC38cXmQB05tRNxS1B6BXCjMRdy0v4pa7oKM2GA4ADKpNrr0RI9ed+peRFwmsclH test@ansible + agent_pool_profiles: + - name: default + count: 1 + vm_size: Standard_B2s + type: VirtualMachineScaleSets + mode: System + max_pods: 42 + availability_zones: + - 1 + - 2 + node_resource_group: "node{{ noderpfx }}" + enable_rbac: yes + network_profile: + load_balancer_sku: standard + register: output + + - name: Assert idempotent + assert: + that: + - not output.changed + + - name: Get available version + azure_rm_aksversion_info: + location: eastus + version: "{{ versions.azure_aks_versions[0] }}" + register: version1 + + - name: Upgrade the AKS instance with addon + azure_rm_aks: + name: "aks{{ rpfx }}" + resource_group: "{{ resource_group }}" + location: eastus + dns_prefix: "aks{{ rpfx }}" + kubernetes_version: "{{ version1.azure_aks_versions[0] }}" + service_principal: + client_id: "{{ azure_client_id }}" + linux_profile: + admin_username: azureuser + ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSPmiqkvDH1/+MDAVDZT8381aYqp73Odz8cnD5hegNhqtXajqtiH0umVg7HybX3wt1HjcrwKJovZURcIbbcDvzdH2bnYbF93T4OLXA0bIfuIp6M86x1iutFtXdpN3TTicINrmSXEE2Ydm51iMu77B08ZERjVaToya2F7vC+egfoPvibf7OLxE336a5tPCywavvNihQjL8sjgpDT5AAScjb3YqK/6VLeQ18Ggt8/ufINsYkb+9/Ji/3OcGFeflnDXq80vPUyF3u4iIylob6RSZenC38cXmQB05tRNxS1B6BXCjMRdy0v4pa7oKM2GA4ADKpNrr0RI9ed+peRFwmsclH test@ansible + agent_pool_profiles: + - name: default + count: 1 + vm_size: Standard_B2s + type: VirtualMachineScaleSets + mode: System + max_pods: 42 + availability_zones: + - 1 + - 2 + node_resource_group: "node{{ noderpfx }}" + addon: + http_application_routing: {} + network_profile: + network_plugin: kubenet + load_balancer_sku: standard + enable_rbac: yes + register: output + + - name: Assert the AKS instance is upgraded + assert: + that: + - output.changed + - output.kubernetes_version == version1.azure_aks_versions[0] + - output.addon.httpApplicationRouting.enabled == True + - output.agent_pool_profiles[0].count == 1 + - output.network_profile.network_plugin == 'kubenet' + + - name: Upgrade the AKS instance with addon (idempontent) + azure_rm_aks: + name: "aks{{ rpfx }}" + resource_group: "{{ resource_group }}" + location: eastus + dns_prefix: "aks{{ rpfx }}" + kubernetes_version: "{{ version1.azure_aks_versions[0] }}" + service_principal: + client_id: "{{ azure_client_id }}" + linux_profile: + admin_username: azureuser + ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSPmiqkvDH1/+MDAVDZT8381aYqp73Odz8cnD5hegNhqtXajqtiH0umVg7HybX3wt1HjcrwKJovZURcIbbcDvzdH2bnYbF93T4OLXA0bIfuIp6M86x1iutFtXdpN3TTicINrmSXEE2Ydm51iMu77B08ZERjVaToya2F7vC+egfoPvibf7OLxE336a5tPCywavvNihQjL8sjgpDT5AAScjb3YqK/6VLeQ18Ggt8/ufINsYkb+9/Ji/3OcGFeflnDXq80vPUyF3u4iIylob6RSZenC38cXmQB05tRNxS1B6BXCjMRdy0v4pa7oKM2GA4ADKpNrr0RI9ed+peRFwmsclH test@ansible + agent_pool_profiles: + - name: default + count: 1 + vm_size: Standard_B2s + type: VirtualMachineScaleSets + mode: System + max_pods: 42 + availability_zones: + - 1 + - 2 + node_resource_group: "node{{ noderpfx }}" + addon: + http_application_routing: {} + network_profile: + network_plugin: kubenet + load_balancer_sku: standard + enable_rbac: yes + register: output + + - assert: + that: + - not output.changed + + - name: Upgrade the AKS instance with agent pool profiles + azure_rm_aks: + name: "aks{{ rpfx }}" + resource_group: "{{ resource_group }}" + location: eastus + dns_prefix: "aks{{ rpfx }}" + kubernetes_version: "{{ version1.azure_aks_versions[0] }}" + service_principal: + client_id: "{{ azure_client_id }}" + client_secret: "{{ azure_secret }}" + linux_profile: + admin_username: azureuser + ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSPmiqkvDH1/+MDAVDZT8381aYqp73Odz8cnD5hegNhqtXajqtiH0umVg7HybX3wt1HjcrwKJovZURcIbbcDvzdH2bnYbF93T4OLXA0bIfuIp6M86x1iutFtXdpN3TTicINrmSXEE2Ydm51iMu77B08ZERjVaToya2F7vC+egfoPvibf7OLxE336a5tPCywavvNihQjL8sjgpDT5AAScjb3YqK/6VLeQ18Ggt8/ufINsYkb+9/Ji/3OcGFeflnDXq80vPUyF3u4iIylob6RSZenC38cXmQB05tRNxS1B6BXCjMRdy0v4pa7oKM2GA4ADKpNrr0RI9ed+peRFwmsclH test@ansible + agent_pool_profiles: + - name: default + count: 1 + vm_size: Standard_B2s + type: VirtualMachineScaleSets + mode: System + enable_auto_scaling: True + max_count: 6 + min_count: 1 + max_pods: 42 + availability_zones: + - 1 + - 2 + node_resource_group: "node{{ noderpfx }}" + enable_rbac: yes + network_profile: + load_balancer_sku: standard + register: output + ignore_errors: yes + + - name: Assert the AKS instance is well created + assert: + that: + - output.changed + - output.provisioning_state == 'Succeeded' + ignore_errors: yes + + - name: Upgrade the AKS instance with agent pool profiles (idempontent) + azure_rm_aks: + name: "aks{{ rpfx }}" + resource_group: "{{ resource_group }}" + location: eastus + dns_prefix: "aks{{ rpfx }}" + kubernetes_version: "{{ version1.azure_aks_versions[0] }}" + service_principal: + client_id: "{{ azure_client_id }}" + client_secret: "{{ azure_secret }}" + linux_profile: + admin_username: azureuser + ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSPmiqkvDH1/+MDAVDZT8381aYqp73Odz8cnD5hegNhqtXajqtiH0umVg7HybX3wt1HjcrwKJovZURcIbbcDvzdH2bnYbF93T4OLXA0bIfuIp6M86x1iutFtXdpN3TTicINrmSXEE2Ydm51iMu77B08ZERjVaToya2F7vC+egfoPvibf7OLxE336a5tPCywavvNihQjL8sjgpDT5AAScjb3YqK/6VLeQ18Ggt8/ufINsYkb+9/Ji/3OcGFeflnDXq80vPUyF3u4iIylob6RSZenC38cXmQB05tRNxS1B6BXCjMRdy0v4pa7oKM2GA4ADKpNrr0RI9ed+peRFwmsclH test@ansible + agent_pool_profiles: + - name: default + count: 1 + vm_size: Standard_B2s + type: VirtualMachineScaleSets + mode: System + enable_auto_scaling: True + max_count: 6 + min_count: 1 + max_pods: 42 + availability_zones: + - 1 + - 2 + node_resource_group: "node{{ noderpfx }}" + enable_rbac: yes + network_profile: + load_balancer_sku: standard + register: output + + - name: Assert the AKS instance is well created + assert: + that: + - not output.changed + + - name: Upgrade the AKS instance with multiple agent pool profiles + azure_rm_aks: + name: "aks{{ rpfx }}" + resource_group: "{{ resource_group }}" + location: eastus + dns_prefix: "aks{{ rpfx }}" + kubernetes_version: "{{ version1.azure_aks_versions[0] }}" + service_principal: + client_id: "{{ azure_client_id }}" + client_secret: "{{ azure_secret }}" + linux_profile: + admin_username: azureuser + ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSPmiqkvDH1/+MDAVDZT8381aYqp73Odz8cnD5hegNhqtXajqtiH0umVg7HybX3wt1HjcrwKJovZURcIbbcDvzdH2bnYbF93T4OLXA0bIfuIp6M86x1iutFtXdpN3TTicINrmSXEE2Ydm51iMu77B08ZERjVaToya2F7vC+egfoPvibf7OLxE336a5tPCywavvNihQjL8sjgpDT5AAScjb3YqK/6VLeQ18Ggt8/ufINsYkb+9/Ji/3OcGFeflnDXq80vPUyF3u4iIylob6RSZenC38cXmQB05tRNxS1B6BXCjMRdy0v4pa7oKM2GA4ADKpNrr0RI9ed+peRFwmsclH test@ansible + agent_pool_profiles: + - name: default + count: 1 + vm_size: Standard_B2s + type: VirtualMachineScaleSets + mode: System + enable_auto_scaling: True + max_count: 6 + min_count: 1 + max_pods: 42 + availability_zones: + - 1 + - 2 + - name: default2 + count: 1 + vm_size: Standard_B2s + type: VirtualMachineScaleSets + mode: User + node_resource_group: "node{{ noderpfx }}" + enable_rbac: yes + network_profile: + load_balancer_sku: standard + register: output + ignore_errors: yes + + - name: Assert the AKS instance is well created + assert: + that: + - output.changed + - "output.agent_pool_profiles | length == 2" + - output.provisioning_state == 'Succeeded' + - output.agent_pool_profiles[1].mode == 'User' + ignore_errors: yes + + - name: Upgrade the AKS instance with multiple agent pool profiles (idempontent) + azure_rm_aks: + name: "aks{{ rpfx }}" + resource_group: "{{ resource_group }}" + location: eastus + dns_prefix: "aks{{ rpfx }}" + kubernetes_version: "{{ version1.azure_aks_versions[0] }}" + service_principal: + client_id: "{{ azure_client_id }}" + client_secret: "{{ azure_secret }}" + linux_profile: + admin_username: azureuser + ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSPmiqkvDH1/+MDAVDZT8381aYqp73Odz8cnD5hegNhqtXajqtiH0umVg7HybX3wt1HjcrwKJovZURcIbbcDvzdH2bnYbF93T4OLXA0bIfuIp6M86x1iutFtXdpN3TTicINrmSXEE2Ydm51iMu77B08ZERjVaToya2F7vC+egfoPvibf7OLxE336a5tPCywavvNihQjL8sjgpDT5AAScjb3YqK/6VLeQ18Ggt8/ufINsYkb+9/Ji/3OcGFeflnDXq80vPUyF3u4iIylob6RSZenC38cXmQB05tRNxS1B6BXCjMRdy0v4pa7oKM2GA4ADKpNrr0RI9ed+peRFwmsclH test@ansible + agent_pool_profiles: + - name: default + count: 1 + vm_size: Standard_B2s + type: VirtualMachineScaleSets + enable_auto_scaling: True + max_count: 6 + min_count: 1 + max_pods: 42 + availability_zones: + - 1 + - 2 + - name: default2 + count: 1 + vm_size: Standard_B2s + type: VirtualMachineScaleSets + mode: User + node_resource_group: "node{{ noderpfx }}" + enable_rbac: yes + network_profile: + load_balancer_sku: standard + register: output + + - name: Assert the AKS instance is well created + assert: + that: + - not output.changed + + - name: Update the default2 agent_pool mode from User to System + azure_rm_aks: + name: "aks{{ rpfx }}" + resource_group: "{{ resource_group }}" + location: eastus + dns_prefix: "aks{{ rpfx }}" + kubernetes_version: "{{ version1.azure_aks_versions[0] }}" + service_principal: + client_id: "{{ azure_client_id }}" + client_secret: "{{ azure_secret }}" + linux_profile: + admin_username: azureuser + ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSPmiqkvDH1/+MDAVDZT8381aYqp73Odz8cnD5hegNhqtXajqtiH0umVg7HybX3wt1HjcrwKJovZURcIbbcDvzdH2bnYbF93T4OLXA0bIfuIp6M86x1iutFtXdpN3TTicINrmSXEE2Ydm51iMu77B08ZERjVaToya2F7vC+egfoPvibf7OLxE336a5tPCywavvNihQjL8sjgpDT5AAScjb3YqK/6VLeQ18Ggt8/ufINsYkb+9/Ji/3OcGFeflnDXq80vPUyF3u4iIylob6RSZenC38cXmQB05tRNxS1B6BXCjMRdy0v4pa7oKM2GA4ADKpNrr0RI9ed+peRFwmsclH test@ansible + agent_pool_profiles: + - name: default + count: 1 + vm_size: Standard_B2s + type: VirtualMachineScaleSets + mode: System + enable_auto_scaling: True + max_count: 6 + min_count: 1 + max_pods: 42 + availability_zones: + - 1 + - 2 + - name: default2 + count: 1 + vm_size: Standard_B2s + type: VirtualMachineScaleSets + mode: System + node_resource_group: "node{{ noderpfx }}" + enable_rbac: yes + network_profile: + load_balancer_sku: standard + ignore_errors: yes + register: output + + - name: Assert the AKS instance is well created + assert: + that: + - output.changed + - "output.agent_pool_profiles | length == 2" + - output.provisioning_state == 'Succeeded' + - output.agent_pool_profiles[1].mode == 'System' + ignore_errors: yes + + - name: Update the default2 agent_pool mode from User to System (idempontent) + azure_rm_aks: + name: "aks{{ rpfx }}" + resource_group: "{{ resource_group }}" + location: eastus + dns_prefix: "aks{{ rpfx }}" + kubernetes_version: "{{ version1.azure_aks_versions[0] }}" + service_principal: + client_id: "{{ azure_client_id }}" + client_secret: "{{ azure_secret }}" + linux_profile: + admin_username: azureuser + ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSPmiqkvDH1/+MDAVDZT8381aYqp73Odz8cnD5hegNhqtXajqtiH0umVg7HybX3wt1HjcrwKJovZURcIbbcDvzdH2bnYbF93T4OLXA0bIfuIp6M86x1iutFtXdpN3TTicINrmSXEE2Ydm51iMu77B08ZERjVaToya2F7vC+egfoPvibf7OLxE336a5tPCywavvNihQjL8sjgpDT5AAScjb3YqK/6VLeQ18Ggt8/ufINsYkb+9/Ji/3OcGFeflnDXq80vPUyF3u4iIylob6RSZenC38cXmQB05tRNxS1B6BXCjMRdy0v4pa7oKM2GA4ADKpNrr0RI9ed+peRFwmsclH test@ansible + agent_pool_profiles: + - name: default + count: 1 + vm_size: Standard_B2s + type: VirtualMachineScaleSets + enable_auto_scaling: True + max_count: 6 + min_count: 1 + max_pods: 42 + availability_zones: + - 1 + - 2 + - name: default2 + count: 1 + vm_size: Standard_B2s + type: VirtualMachineScaleSets + mode: System + node_resource_group: "node{{ noderpfx }}" + enable_rbac: yes + network_profile: + load_balancer_sku: standard + register: output + + - name: Get AKS fact + azure_rm_aks_info: + name: "aks{{ rpfx }}" + resource_group: "{{ resource_group }}" + show_kubeconfig: user + register: fact + + - name: Assert fact returns the created one + assert: + that: + - "fact.aks | length == 1" + - fact.aks[0].kube_config == output.kube_config + + - name: Delete the AKS instance + azure_rm_aks: + name: "aks{{ rpfx }}" + resource_group: "{{ resource_group }}" + state: absent + register: output + + - name: Assert the AKS instance is well deleted + assert: + that: + - output.changed + + - name: Delete the AKS instance (idempotent) + azure_rm_aks: + name: "aks{{ rpfx }}" + resource_group: "{{ resource_group }}" + state: absent + register: output + + - name: Assert idempotent + assert: + that: + - not output.changed + + - name: Get AKS fact + azure_rm_aks_info: + name: "aks{{ rpfx }}" + resource_group: "{{ resource_group }}" + register: fact + + - name: Assert fact returns empty + assert: + that: + - "fact.aks | length == 0" diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aks/tasks/minimal-cluster.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aks/tasks/minimal-cluster.yml new file mode 100644 index 000000000..bda3c06ec --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aks/tasks/minimal-cluster.yml @@ -0,0 +1,136 @@ +- set_fact: + rpfx: "{{ resource_group | hash('md5') | truncate(8, True, '') }}" + +- name: Find available k8s version + azure_rm_aksversion_info: + location: eastus + register: versions + +- name: Use minimal parameters and system-assigned identity + azure_rm_aks: + name: "minimal{{ rpfx }}" + location: eastus + resource_group: "{{ resource_group }}" + kubernetes_version: "{{ versions.azure_aks_versions[0] }}" + dns_prefix: "aks{{ rpfx }}" + enable_rbac: true + aad_profile: + managed: true + agent_pool_profiles: + - name: default + count: 1 + vm_size: Standard_B2s + mode: System + api_server_access_profile: + authorized_ip_ranges: + - "192.0.2.0" + - "198.51.100.0" + - "203.0.113.0" + enable_private_cluster: no + network_profile: + load_balancer_sku: standard + outbound_type: loadBalancer + register: output + +- name: Assert the AKS instance is well created + assert: + that: + - output.changed + - output.provisioning_state == 'Succeeded' + +- name: Get AKS fact + azure_rm_aks_info: + name: "minimal{{ rpfx }}" + resource_group: "{{ resource_group }}" + register: fact + +- name: Assert fact returns the created one + assert: + that: + - "fact.aks | length == 1" + - fact.aks[0].id == output.id + - fact.aks[0].properties.aadProfile.managed == true + +- name: Use minimal parameters and system-assigned identity (idempotent) + azure_rm_aks: + name: "minimal{{ rpfx }}" + location: eastus + resource_group: "{{ resource_group }}" + kubernetes_version: "{{ versions.azure_aks_versions[0] }}" + dns_prefix: "aks{{ rpfx }}" + enable_rbac: true + aad_profile: + managed: true + agent_pool_profiles: + - name: default + count: 1 + vm_size: Standard_B2s + mode: System + api_server_access_profile: + authorized_ip_ranges: + - "192.0.2.0" + - "198.51.100.0" + - "203.0.113.0" + enable_private_cluster: no + network_profile: + load_balancer_sku: standard + outbound_type: loadBalancer + register: output + +- name: Assert idempotent + assert: + that: + - not output.changed + +- name: Update api_server_access_profile config + azure_rm_aks: + name: "minimal{{ rpfx }}" + location: eastus + resource_group: "{{ resource_group }}" + kubernetes_version: "{{ versions.azure_aks_versions[0] }}" + dns_prefix: "aks{{ rpfx }}" + enable_rbac: true + agent_pool_profiles: + - name: default + count: 1 + vm_size: Standard_B2s + mode: System + api_server_access_profile: + authorized_ip_ranges: + - "173.0.113.0" + - "192.0.2.0" + - "198.51.100.0" + - "203.0.113.0" + enable_private_cluster: no + network_profile: + load_balancer_sku: standard + outbound_type: loadBalancer + register: output + +- name: Assert idempotent + assert: + that: + - output.changed + +- name: Delete the AKS instance + azure_rm_aks: + name: "minimal{{ rpfx }}" + resource_group: "{{ resource_group }}" + state: absent + register: output + +- name: Assert the AKS instance is well deleted + assert: + that: + - output.changed + +- name: Get AKS fact + azure_rm_aks_info: + name: "minimal{{ rpfx }}" + resource_group: "{{ resource_group }}" + register: fact + +- name: Assert fact returns empty + assert: + that: + - "fact.aks | length == 0" diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aksagentpool/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aksagentpool/aliases new file mode 100644 index 000000000..7aca0ce4f --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aksagentpool/aliases @@ -0,0 +1,3 @@ +cloud/azure +destructive +shippable/azure/group11 diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aksagentpool/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aksagentpool/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aksagentpool/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aksagentpool/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aksagentpool/tasks/main.yml new file mode 100644 index 000000000..1d098e7f1 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aksagentpool/tasks/main.yml @@ -0,0 +1,172 @@ +- set_fact: + rpfx: "{{ resource_group | hash('md5') | truncate(8, True, '') }}" + +- name: Find available k8s version + azure_rm_aksversion_info: + location: eastus + register: versions + +- name: Create a kubernet service with minimal parameters + azure_rm_aks: + name: "min{{ rpfx }}" + location: eastus + resource_group: "{{ resource_group }}" + kubernetes_version: "{{ versions.azure_aks_versions[0] }}" + dns_prefix: "aks{{ rpfx }}" + enable_rbac: true + aad_profile: + managed: true + agent_pool_profiles: + - name: default + count: 1 + vm_size: Standard_B2s + mode: System + api_server_access_profile: + authorized_ip_ranges: + - "192.0.2.0" + - "198.51.100.0" + - "203.0.113.0" + enable_private_cluster: no + network_profile: + load_balancer_sku: standard + outbound_type: loadBalancer + register: output + +- name: Get cluster's node agent pool info + azure_rm_aksagentpool_info: + resource_group: "{{ resource_group }}" + cluster_name: "min{{ rpfx }}" + register: output + +- name: Assert the cluster with one agent pool + assert: + that: + - "output.aks_agent_pools | length == 1" + +- name: Get aks agent pool support version + azure_rm_aksagentpoolversion_info: + resource_group: "{{ resource_group }}" + cluster_name: "min{{ rpfx }}" + register: agentpool_version + +- name: Assert the orchestrator version is not None + assert: + that: + - "agentpool_version.azure_orchestrator_version | length >= 1" + +- name: Add agent pool + azure_rm_aksagentpool: + resource_group: "{{ resource_group }}" + cluster_name: "min{{ rpfx }}" + name: default03 + count: 1 + vm_size: Standard_B2s + type_properties_type: VirtualMachineScaleSets + mode: System + node_labels: {"release":"stable"} + max_pods: 42 + enable_auto_scaling: True + min_count: 1 + max_count: 10 + orchestrator_version: "{{ agentpool_version.azure_orchestrator_version[0] }}" + availability_zones: + - 1 + - 2 + register: output + +- name: Assert the node agent pool created + assert: + that: + - output.changed + +- name: Add agent pool (Idempotent test) + azure_rm_aksagentpool: + resource_group: "{{ resource_group }}" + cluster_name: "min{{ rpfx }}" + name: default03 + count: 1 + vm_size: Standard_B2s + type_properties_type: VirtualMachineScaleSets + mode: System + node_labels: {"release":"stable"} + max_pods: 42 + enable_auto_scaling: True + min_count: 1 + max_count: 10 + orchestrator_version: "{{ agentpool_version.azure_orchestrator_version[0] }}" + availability_zones: + - 1 + - 2 + register: output + +- name: Assert the node agent pool not changed + assert: + that: + - not output.changed + +- name: Get cluster's node agent pool info + azure_rm_aksagentpool_info: + resource_group: "{{ resource_group }}" + cluster_name: "min{{ rpfx }}" + register: output + +- name: Assert node agent pool + assert: + that: + - "output.aks_agent_pools | length == 2" + +- name: Upgrade node agent pool + azure_rm_aksagentpool: + resource_group: "{{ resource_group }}" + cluster_name: "min{{ rpfx }}" + name: default03 + count: 1 + vm_size: Standard_B2s + type_properties_type: VirtualMachineScaleSets + mode: System + node_labels: {"release":"stable"} + max_pods: 42 + enable_auto_scaling: True + min_count: 2 + max_count: 20 + orchestrator_version: "{{ agentpool_version.azure_orchestrator_version[0] }}" + availability_zones: + - 1 + - 2 + register: output + +- name: Assert the node agent pool udpated + assert: + that: + - output.changed + +- name: Get cluster's node agent pool info + azure_rm_aksagentpool_info: + resource_group: "{{ resource_group }}" + cluster_name: "min{{ rpfx }}" + name: default03 + register: output + +- name: Assert node agent configuration + assert: + that: + - output.aks_agent_pools[0].availability_zones == [1, 2] + - output.aks_agent_pools[0].count == 1 + - output.aks_agent_pools[0].min_count == 2 + - output.aks_agent_pools[0].max_count == 20 + - output.aks_agent_pools[0].type_properties_type == "VirtualMachineScaleSets" + - output.aks_agent_pools[0].max_pods == 42 + - output.aks_agent_pools[0].orchestrator_version == agentpool_version.azure_orchestrator_version[0] + +- name: Delete node agent pool + azure_rm_aksagentpool: + resource_group: "{{ resource_group }}" + cluster_name: "min{{ rpfx }}" + name: default03 + state: absent + register: output + +- name: Assert the node agent pool has deleted + assert: + that: + - output.changed diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_apimanagement/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_apimanagement/aliases new file mode 100644 index 000000000..5f6d48767 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_apimanagement/aliases @@ -0,0 +1,4 @@ +cloud/azure +shippable/azure/group15 +destructive +disabled diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_apimanagement/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_apimanagement/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_apimanagement/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_apimanagement/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_apimanagement/tasks/main.yml new file mode 100644 index 000000000..72b5588c4 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_apimanagement/tasks/main.yml @@ -0,0 +1,139 @@ +- name: Fix resource prefix + set_fact: + api_id: "myPolicy{{ resource_group | hash('md5') | truncate(23, True, '') }}" + service_name: "myService{{ resource_group | hash('md5') | truncate(22, True, '') }}" + display_name: test-api + path: myapipath + +- name: create API management service + azure_rm_apimanagementservice: + resource_group: "{{ resource_group }}" + name: "{{ service_name }}" + publisher_email: user@example.com + publisher_name: Username + sku_name: Developer + sku_capacity: 1 + +- name: Pause for 60 mimutes to create api mangement + pause: + minutes: 60 + +- name: Create a new API instance + azure_rm_apimanagement: + resource_group: "{{ resource_group }}" + service_name: "{{ service_name }}" + api_id: "{{ api_id }}" + description: "testDescription" + display_name: "{{ display_name }}" + service_url: 'http://testapi.example.net/api' + path: "{{ path }}" + protocols: + - https + register: newApi + +- name: Assert that output has changed + assert: + that: + - newApi.changed == True + - newApi.failed == False + +- name: Create a new API instance(Idempotent) + azure_rm_apimanagement: + resource_group: "{{ resource_group }}" + service_name: "{{ service_name }}" + api_id: "{{ api_id }}" + description: "testDescription" + display_name: "{{ display_name }}" + service_url: 'http://testapi.example.net/api' + path: "{{ path }}" + protocols: + - https + register: newApi_idempotent + +- name: Assert that output has changed + assert: + that: + - newApi_idempotent.changed == False + - newApi_idempotent.failed == False + +- name: Update API's protocols + azure_rm_apimanagement: + resource_group: "{{ resource_group }}" + service_name: "{{ service_name }}" + api_id: "{{ api_id }}" + description: "testDescription" + display_name: "{{ display_name }}" + service_url: 'http://testapi.example.net/api' + path: "{{ path }}" + protocols: + - https + - http + register: updateApi + +- name: Assert that output has changed + assert: + that: + - updateApi.changed == True + +- name: Create different format api + azure_rm_apimanagement: + resource_group: "{{ resource_group }}" + service_name: "{{ service_name }}" + api_id: "{{ api_id }}-secondary" + path: "{{ path }}-secondary" + format: openapi + display_name: "{{ display_name }}-secondary" + protocols: + - https + register: newOpenApi + +- name: Assert that output has changed + assert: + that: + - newOpenApi.changed == True + - newOpenApi.failed == False + +- name: Get api information + azure_rm_apimanagement_info: + resource_group: "{{ resource_group }}" + service_name: "{{ service_name }}" + api_id: "{{ api_id }}" + register: output + +- name: Assert that output has changed + assert: + that: + - output.api.id != None + +- name: Delete an api + azure_rm_apimanagement: + resource_group: "{{ resource_group }}" + service_name: "{{ service_name }}" + api_id: "{{ api_id }}" + state: absent + register: deleteApi + +- name: Assert the changes + assert: + that: + - deleteApi.changed == True + +- name: Delete an api(Idempotent) + azure_rm_apimanagement: + resource_group: "{{ resource_group }}" + service_name: "{{ service_name }}" + api_id: "{{ api_id }}" + state: absent + register: deleteApi_idempotent + +- name: Assert the changes + assert: + that: + - deleteApi_idempotent.changed == False + +- name: Delete an api + azure_rm_apimanagement: + resource_group: "{{ resource_group }}" + service_name: "{{ service_name }}" + api_id: "{{ api_id }}-secondary" + state: absent diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_apimanagementservice/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_apimanagementservice/aliases new file mode 100644 index 000000000..6feba04aa --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_apimanagementservice/aliases @@ -0,0 +1,4 @@ +cloud/azure +shippable/azure/group10 +destructive +disabled diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_apimanagementservice/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_apimanagementservice/meta/main.yml new file mode 100644 index 000000000..48f5726d8 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_apimanagementservice/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure \ No newline at end of file diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_apimanagementservice/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_apimanagementservice/tasks/main.yml new file mode 100644 index 000000000..0dab04af8 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_apimanagementservice/tasks/main.yml @@ -0,0 +1,58 @@ +- name: Fix resource prefix + set_fact: + name: "myPolicy{{ resource_group | hash('md5') | truncate(22, True, '') }}" + +- name: To create API Management service + azure_rm_apimanagementservice: + resource_group: "{{ resource_group }}" + name: "{{ name }}" + publisher_email: user@example.com + publisher_name: Username + sku_name: Developer + sku_capacity: 1 + register: output + +- name: Assert that output has changed + assert: + that: + - output.changed == True + - output.failed == False + +- name: Recreate API Management service( Idempotent test) + azure_rm_apimanagementservice: + resource_group: "{{ resource_group }}" + name: "{{ name }}" + publisher_email: user@example.com + publisher_name: Username + sku_name: Developer + sku_capacity: 1 + register: output + +- name: Assert that output has changed + assert: + that: + - output.changed == False + - output.failed == False + +- name: Get api management service information + azure_rm_apimanagementservice_info: + resource_group: "{{ resource_group }}" + name: "{{ name }}" + register: output + +- name: Assert that output has changed + assert: + that: + - output.api_management_service.name == "{{ name }}" + +- name: To delete an api management service + azure_rm_apimanagementservice: + resource_group: "{{ resource_group }}" + name: "{{ name }}" + state: absent + register: output + +- name: Assert the changes + assert: + that: + - output.changed == True diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_appgateway/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_appgateway/aliases new file mode 100644 index 000000000..77e564784 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_appgateway/aliases @@ -0,0 +1,4 @@ +cloud/azure +destructive +shippable/azure/group6 +disabled diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_appgateway/files/cert1.txt b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_appgateway/files/cert1.txt new file mode 100644 index 000000000..82a13f4ce --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_appgateway/files/cert1.txt @@ -0,0 +1 @@ +MIIMAjCCCeqgAwIBAgITLQAAMpnXBx230XCKQgAAAAAymTANBgkqhkiG9w0BAQsFADCBizELMAkGA1UEBhMCVVMxEzARBgNVBAgTCldhc2hpbmd0b24xEDAOBgNVBAcTB1JlZG1vbmQxHjAcBgNVBAoTFU1pY3Jvc29mdCBDb3Jwb3JhdGlvbjEVMBMGA1UECxMMTWljcm9zb2Z0IElUMR4wHAYDVQQDExVNaWNyb3NvZnQgSVQgVExTIENBIDUwHhcNMTcwNzIwMTc0NzA4WhcNMTkwNzEwMTc0NzA4WjAXMRUwEwYDVQQDEwx3d3cuYmluZy5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC6jsg+/7DlIrdgFOcaDlK3RQ9sIgkJsgpj+ZxAbIe3ziyimIxjVlHX87pqgXcNhaYNbCFD0iPm+aUfbv4GDTLR+AIr8eSegqxZ+CBToYM67NhpVYra1KAvY4XgqxorO4FB9IWYJRqhI3SZeZ3lLK5t9XuUMicG8l52nJfpPdXXvBca2wUCq8FHEObG81vJzESA0htLLPTjdUWBQnXPiW5bqzlGHzzv8ISV6jtDLNNa5JRlhSlXho+6pCedhNF7MP4yTaantPvAELLRWX13VhjgoCcRCCu0s8rxW5DuVWl2Pb2iw35MFnNWlcoVwq0AjAfGA+xEba/WLid6qfkQctYjAgMBAAGjggfQMIIHzDAdBgNVHQ4EFgQUCYflhSl4MCAls91+3GztpSmoA3AwCwYDVR0PBAQDAgSwMB8GA1UdIwQYMBaAFAj+JZ906ocEwry7jqg4XzPG0WxlMIGsBgNVHR8EgaQwgaEwgZ6ggZuggZiGS2h0dHA6Ly9tc2NybC5taWNyb3NvZnQuY29tL3BraS9tc2NvcnAvY3JsL01pY3Jvc29mdCUyMElUJTIwVExTJTIwQ0ElMjA1LmNybIZJaHR0cDovL2NybC5taWNyb3NvZnQuY29tL3BraS9tc2NvcnAvY3JsL01pY3Jvc29mdCUyMElUJTIwVExTJTIwQ0ElMjA1LmNybDCBhQYIKwYBBQUHAQEEeTB3MFEGCCsGAQUFBzAChkVodHRwOi8vd3d3Lm1pY3Jvc29mdC5jb20vcGtpL21zY29ycC9NaWNyb3NvZnQlMjBJVCUyMFRMUyUyMENBJTIwNS5jcnQwIgYIKwYBBQUHMAGGFmh0dHA6Ly9vY3NwLm1zb2NzcC5jb20wPgYJKwYBBAGCNxUHBDEwLwYnKwYBBAGCNxUIh9qGdYPu2QGCyYUbgbWeYYX062CBXYTS30KC55N6AgFkAgEQMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDATBNBgNVHSAERjBEMEIGCSsGAQQBgjcqATA1MDMGCCsGAQUFBwIBFidodHRwOi8vd3d3Lm1pY3Jvc29mdC5jb20vcGtpL21zY29ycC9jcHMwJwYJKwYBBAGCNxUKBBowGDAKBggrBgEFBQcDAjAKBggrBgEFBQcDATCCBW0GA1UdEQSCBWQwggVgggx3d3cuYmluZy5jb22CEGRpY3QuYmluZy5jb20uY26CEyoucGxhdGZvcm0uYmluZy5jb22CCiouYmluZy5jb22CCGJpbmcuY29tghZpZW9ubGluZS5taWNyb3NvZnQuY29tghMqLndpbmRvd3NzZWFyY2guY29tghljbi5pZW9ubGluZS5taWNyb3NvZnQuY29tghEqLm9yaWdpbi5iaW5nLmNvbYINKi5tbS5iaW5nLm5ldIIOKi5hcGkuYmluZy5jb22CGGVjbi5kZXYudmlydHVhbGVhcnRoLm5ldIINKi5jbi5iaW5nLm5ldIINKi5jbi5iaW5nLmNvbYIQc3NsLWFwaS5iaW5nLmNvbYIQc3NsLWFwaS5iaW5nLm5ldIIOKi5hcGkuYmluZy5uZXSCDiouYmluZ2FwaXMuY29tgg9iaW5nc2FuZGJveC5jb22CFmZlZWRiYWNrLm1pY3Jvc29mdC5jb22CG2luc2VydG1lZGlhLmJpbmcub2ZmaWNlLm5ldIIOci5iYXQuYmluZy5jb22CECouci5iYXQuYmluZy5jb22CEiouZGljdC5iaW5nLmNvbS5jboIPKi5kaWN0LmJpbmcuY29tgg4qLnNzbC5iaW5nLmNvbYIQKi5hcHBleC5iaW5nLmNvbYIWKi5wbGF0Zm9ybS5jbi5iaW5nLmNvbYINd3AubS5iaW5nLmNvbYIMKi5tLmJpbmcuY29tgg9nbG9iYWwuYmluZy5jb22CEXdpbmRvd3NzZWFyY2guY29tgg5zZWFyY2gubXNuLmNvbYIRKi5iaW5nc2FuZGJveC5jb22CGSouYXBpLnRpbGVzLmRpdHUubGl2ZS5jb22CDyouZGl0dS5saXZlLmNvbYIYKi50MC50aWxlcy5kaXR1LmxpdmUuY29tghgqLnQxLnRpbGVzLmRpdHUubGl2ZS5jb22CGCoudDIudGlsZXMuZGl0dS5saXZlLmNvbYIYKi50My50aWxlcy5kaXR1LmxpdmUuY29tghUqLnRpbGVzLmRpdHUubGl2ZS5jb22CCzNkLmxpdmUuY29tghNhcGkuc2VhcmNoLmxpdmUuY29tghRiZXRhLnNlYXJjaC5saXZlLmNvbYIVY253ZWIuc2VhcmNoLmxpdmUuY29tggxkZXYubGl2ZS5jb22CDWRpdHUubGl2ZS5jb22CEWZhcmVjYXN0LmxpdmUuY29tgg5pbWFnZS5saXZlLmNvbYIPaW1hZ2VzLmxpdmUuY29tghFsb2NhbC5saXZlLmNvbS5hdYIUbG9jYWxzZWFyY2gubGl2ZS5jb22CFGxzNGQuc2VhcmNoLmxpdmUuY29tgg1tYWlsLmxpdmUuY29tghFtYXBpbmRpYS5saXZlLmNvbYIObG9jYWwubGl2ZS5jb22CDW1hcHMubGl2ZS5jb22CEG1hcHMubGl2ZS5jb20uYXWCD21pbmRpYS5saXZlLmNvbYINbmV3cy5saXZlLmNvbYIcb3JpZ2luLmNud2ViLnNlYXJjaC5saXZlLmNvbYIWcHJldmlldy5sb2NhbC5saXZlLmNvbYIPc2VhcmNoLmxpdmUuY29tghJ0ZXN0Lm1hcHMubGl2ZS5jb22CDnZpZGVvLmxpdmUuY29tgg92aWRlb3MubGl2ZS5jb22CFXZpcnR1YWxlYXJ0aC5saXZlLmNvbYIMd2FwLmxpdmUuY29tghJ3ZWJtYXN0ZXIubGl2ZS5jb22CE3dlYm1hc3RlcnMubGl2ZS5jb22CFXd3dy5sb2NhbC5saXZlLmNvbS5hdYIUd3d3Lm1hcHMubGl2ZS5jb20uYXUwDQYJKoZIhvcNAQELBQADggIBADTpW/UWeupk40OP6k4yxihKStswxwqPAfMRmx4XyqmTAawAKRNM+6EZth1BQdPdOplwRTvs69kkmUHJH+ZjYXBezEACWkzEiNUQnzkRWajdSQIz08Ubj/mBD6U8xLYD+NXgiB0xNWabd8aiPsqPaj6I3qkNw4JvtgtHZQG1zlwC5/Lu6yV3DM3sKpQMyBmOnX6nVUiS0MTOzLgZOQzRk07nO7EXWGcKTmDBjE8cqv5IA/jQ6gtaxCI5pDxfXK4ct7oQyoChfxOXcEDKMmMndFmg9ch5c4an/FRM2cgzDfjR01A71LNUpLUdOjNV0T+ZEStqEpdyDFfjrHGDtzLyqEz3iyvvQFyjmlGh6OtZXwjCPpnVSrKCmfJKio0kUxyq+6t5tZAQbPVgFKiMrVnU+sgvmNVip1toijyz8vMVCkwJ2G++7xjJukoELMxZ50W4/SAMZLy1Asx02NBwYCu9+CTQPVnmPe7rmxhlQRBOfDNa1+5jwRHY64YudEzKhWR1uqS3ABd/fk+TL86yuNYGAgxnOm1FtOGieRgViV3+NzC+bDbuUOtmbD/GvDGmRwJRcCTHL7jBmkHePh2ABY93NE/IbkaDP6l1Kw98AfqkzSUxhqHXuThe7KIoX9/0zv4AA1WZFis1QvAG7dpl9eio6vCdC/73HvBAlqRL+7Mb1uu0 diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_appgateway/files/cert2.txt b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_appgateway/files/cert2.txt new file mode 100644 index 000000000..a539dbcaf --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_appgateway/files/cert2.txt @@ -0,0 +1 @@ +MIIKsQIBAzCCCm0GCSqGSIb3DQEHAaCCCl4EggpaMIIKVjCCBg8GCSqGSIb3DQEHAaCCBgAEggX8MIIF+DCCBfQGCyqGSIb3DQEMCgECoIIE/jCCBPowHAYKKoZIhvcNAQwBAzAOBAj37r+wRsc6/gICB9AEggTY1V5HNscO+2bZb7JSMme1ljERe7DCiRE2cJsPKlwW2/NEiSqAUX7gXKK0ISr6Dto71rFdza0uPwbcQoPN1QwkJHSAkxWg4OcAx2kf7077tlHhf5rzxTn5V3wXM0Q3h6NsDjSqSHjYVISIiXItUzlDaBpMY/NUFNCTyJR5I91MWsljrJ/bQaAIo57HJR9nzjY5DaBA9P3bAhmX5LJRGsJWoCEaGeeVQ3Yn6yD06ordiJnf6dNxqQGN+o2x54gqfmw+RnoC2f8VAsTIfb3fwJPKdg2JiJIa6Ms2Sc8VR7VGmZt34qZwTPBrzeqJjtIMT41bBae46lmma8ypYwErqzOYSrHqXPXzaxlloYy81HYWAsJTWyBxTsVBcLom5m9ru79+SKG35xY1wSkzZmWMNFfVRFCJy/X+h2ErrGYjogCHYaIUmiosvUccwRUXGU083ul9iTcz/Dl79VBz63OFX/CnZMDTQ8ugbqpvW78pAnBU0r8MUubHciD1sJG2zmMlxCAzan6BLm9OMyhTNIbzYOjQQw99MQQys/ZeyNLqTFHTeGRfU2ewqgHjbH2PYCQfjipXSmdmsSsGxlLA9AOtwAk3QKJ77P03HRGOeXmy/I4iIHuIQuaQcjfprNR2fI36dftDo7U4gvRQHkiti+zNVqpi3/hIc2k7O8bCcMeSvfIlUvWIPUrUceZmpVPpLdcFcQbN9+1nZwiFYydOhrPnlp40rSO3RM08EmQUfRYt8fwRFcoWBX3b411vOqZVGeMfMtThMYI53R4Cmh5tUp93FslHNmIfnuewhHfIm+vtCicLcW6TaC2l4EqmNf0flK5m5nANotCfqj87MPsB83qPwol/91BTKaxuH2hKrZDgU1ibPE8NhzBinp2ANi0BHK3Sl0CsC2MPyZpFY+4MWvk/SI9ex4VsKYKmhubOFkhDLLBZH0UEmUdNTH4Gd76GsDnfI9arR2ctM9ecTPeu74hKiHlNZhc4U3TX20FBeqF5tZYnfCLRhvdiNM9AlwEKqqQEe0W7PrALcNVdjhJl0X9+0Br28E3RKZQRITWa10Vjmh0WcYrzEQ3/qEZYbqVpHMp+kdrHxB65v0zlGxjdwyKzafLzqYXmaHOyVlFnkayNaAkVVxOCzNrxB9HfhjvhjWafeMvA0p7O9CxTD2xPEhUaHQ5j7L8F0alfMYcg73SdGHAcY6AV8+eh0jqs3IF68cquXOl5Bm8uYKRjtgl9nY6hYc0lRDdtFHZo8ayNDr0cltNU7XZTaCKVNSDTRn92rTNJY0E3PD5HSKcRi58WJrIgEDGasyleRkRlGTY7512Qut0rg7m1Eyp6MK+sNmSSA7cR70pH7I1dwy4VrJMODdMH11y1QJF2EQWQdN00Js54tjVgTIO3btb5N7jhNYpRedv0a4UZ8TdDI4ZMCMf3SdP3xbQ06M1pFrS8WQzwp3KTk8vmnseJL84n0hC8KqWmGmTWHTa9dwmopeM6Xh/Jm1pkrgrloxqfSlscGEJE0plAnk1mLx29FxswfZ6a7pNKg7CydK4SiDkqM+pWukPbgKODqYPUvS0nk3RGGXvZSIzTbvm77tF+MqXOb6Rn+IflIk4yZsjIBQA0I/bQ78YDzXUVsrtAi9waRoCZs+L48NUy4zpKn25FMqkrziVn+TGB4jANBgkrBgEEAYI3EQIxADATBgkqhkiG9w0BCRUxBgQEAQAAADBdBgkqhkiG9w0BCRQxUB5OAHQAZQAtAGMANwBmADEAYwBhADYAMQAtADQAOAA1ADQALQA0ADgAZQBmAC0AYQAwADgANQAtAGQANABhADIAZgA1AGYAOAAyADcAZAAxMF0GCSsGAQQBgjcRATFQHk4ATQBpAGMAcgBvAHMAbwBmAHQAIABTAHQAcgBvAG4AZwAgAEMAcgB5AHAAdABvAGcAcgBhAHAAaABpAGMAIABQAHIAbwB2AGkAZABlAHIwggQ/BgkqhkiG9w0BBwagggQwMIIELAIBADCCBCUGCSqGSIb3DQEHATAcBgoqhkiG9w0BDAEDMA4ECFcAfrkm3ibUAgIH0ICCA/hlBog8GY9GCpucTwAxBGa0cOGQ29EK0xfrmY/Dv59IeJhRr47Mvl1XNk5PIInb64RsOVr00jrJAbfgLLKMBowcQLT7k6jGbGBdOzC57x9DNP0VuHsIIym0Z+SpJgEWBbtdBTGzgNw/YoXYxT4Rtka9ScSyCFjmmCzXz7bGLqC7yrGb7BzigQ9y4u4bg0pf75pERzN8rJM29Ob2IydkgARfpmbNKjdMCtD6dI7tafG2lQfUX6sgQY+Sy5HTz3ansN8X1yv2WQTu8Drxf2ce55v4WrFbPTTND94ubgDt7jvbCe1DuNP1DAYmQ5pbW0GGqF1x2csK5WWD7J8FD08VaQFM8y8pGIUeUkN4rYU3eTdTAQe+ec2hOr9QZn1Sb/p5u4KqIMn4MSCQ8EU0gXa2JETdUjXPr/5JFZTidJYagRyMIkYnwg9uusikctulaBsHMBKMYQ0Z19CEbcd2phdoxWTrtp7kwwjnu64zPgE6ALe9yJOT8AFEB6H1c16Z+aPGj9hbhkh6tcdGWUvzDYq08wjKjP3nA78StIisUmeZPfAXJUquPzRZr0pmcwYWfyP54TdC2BvPlLW/QXVV44IGxUdLuI6mz4p+O2+xKu9QMFwdcpij2ZK4uMrBLDo7ZoTQ4rBRnn471AMUKgeP0D5tbl8PygUU1RqHv34ok3fwx0WglzdMQJyt53PiPWW4lipwUtUfd0eD8CXoMccf8XJmugVUBCD1wQsyCW6RrR9RX8HXVBrm5O2HKfJcQYznl3qHqXb6ofvbOQ3S+v0ALN+sma8Tn6JceVTAOH+UuMdcu0FIDYnpmrvMecnJ2kbs1Y35mj4rSJyP5PGLg+ygb9VlBPwCCem/jHL+YivN38+0oWqfn2slyI4FNKX+5U8M6xpiEaq6McKwKZC1d51A4dUdMAkO2d1Z6rVjqhKeqE6HWD9A0cyPBFZpNQskUfNDW8qILLEfEjhBi+s1LkHzKDykCN/ReFfRiQS84DekoC59cymM8Hs1geMWCMFWfut4HTd7ItYaiJz5qpYVY4U/8myhyWnrktjLjQ6OkdM9bBDIpRHj95MYEC26NlWQZwjk+yynPTZf0w5p2Ok6Dq2shJFykuJ1VkelgvsPe8qMa55Wp11dpREIFzXouPXNP/vFpziZcl1OKTd7Dwa+ruQFRfsoZGzupsBOYxrmYqr6kOm5mzMW0HAlsWwl5mY2aSQMvXBE6k32xTkPIlIp763Ee4m6cmx4+SDcO5D+a9t05QY4JmssL+x3T9qsbXSXDPEsg0cfVvuQYy9AYkIFOes4G45IagRAvhQQj9bEh8kTvp8CFDDtIrbWjX50zreb51VcAcEkIOLyROtIdLem0zA7MB8wBwYFKw4DAhoEFC0i4I5iwNYQug0vTVS0JC/Qm+/NBBTsUM0D9QxIZYUi+qlDy14sOcEaUwICB9A= diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_appgateway/files/cert3b64.txt b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_appgateway/files/cert3b64.txt new file mode 100644 index 000000000..204aaeb1a --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_appgateway/files/cert3b64.txt @@ -0,0 +1 @@ +LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZhekNDQTFPZ0F3SUJBZ0lVTCtURUFCTXdnNlRwaTZLN3Y1SGl6ZlJJaXY0d0RRWUpLb1pJaHZjTkFRRUwKQlFBd1JURUxNQWtHQTFVRUJoTUNRVlV4RXpBUkJnTlZCQWdNQ2xOdmJXVXRVM1JoZEdVeElUQWZCZ05WQkFvTQpHRWx1ZEdWeWJtVjBJRmRwWkdkcGRITWdVSFI1SUV4MFpEQWVGdzB5TWpBNU1qY3dPVFU1TkRCYUZ3MHlNekE1Ck1qY3dPVFU1TkRCYU1FVXhDekFKQmdOVkJBWVRBa0ZWTVJNd0VRWURWUVFJREFwVGIyMWxMVk4wWVhSbE1TRXcKSHdZRFZRUUtEQmhKYm5SbGNtNWxkQ0JYYVdSbmFYUnpJRkIwZVNCTWRHUXdnZ0lpTUEwR0NTcUdTSWIzRFFFQgpBUVVBQTRJQ0R3QXdnZ0lLQW9JQ0FRRE04dHM4YjVkYmhjVkpiMWFjMThaYS92RTh1Ly9oOVZSV3RuSmRxQjhSCkpmRDk3QjFTZ21GbnR2aE92MWJVTEF4VmZkMTRWcGRneUo0QUkrUHRkV2NFbWVvbUxYMkFwZmJSb01LMUhPZFAKYWU2UmRySzYxbkh6QUdhQ2tDVDh1YjVXek0zRnFjWHhJSW84R1BqaXFIaGd5Yy9JVUJBdE42ZGZSVXovSDdpYwpGb25UUTdvWndYRFNSKzUxNHpTeTNzcU9MWTIxbFl5YStqSXNxcldySG9KYnpuUy83Q3BhK25iTENHdEFvTTJwCk5EMVUwVUY0OFJ4NStFdlRLY0FDUGgwQ2hVejZLZVN2SWpnVm81WUo4c0xjK2w1LzdhNkRHVzhCQ3h0VHVSZ3oKL2lEK0pkeTJ4ZmpTekVmQk9tWlVSTkwwalNwczcwZU1ScWsrOUUwSTUwU2RFVFg4Um5HdkF0QlB4TFdyajRXdwpXa2Q2cTQ4TXU3aGM3TmZtZzJ0NzRQMFUvQ2hiSHNIQ1dmSlZrTkVIWnQ1WjZ4NHNkSzJmZFN2clBITVRNaE5pCkxXZjdMaDVLaG5BY0xaRnp0UjBzZis5ZWp0bTV2dTBZWHBJYUxuMEExdThITXJRV3lHYTByOExhQ1V6bmpxSlkKNXVIQUNJbkZwMzlkMXBpeUdRRkk1SzA1OGdxOU51UGhLdytaVENsZjBlMmFyQUVIT2VIdzN2V3ZjYmg1cHE4NQp6T3YzUnBIUUhpbm1WRUdNNHV0cld6Ry9kNGNWSlU3UW5RbzcyU0V3M0oxTWRrU1JpTWVpYTlkUGxmWVkvRzJLCnVweFR4aWlzeXVQek1BckVUZEdBUVpyMkd2YlRNakI1eXJwcjFCTEVJVWwzd1Y3Mmw5YnY1SXp0YnJ2Ly9nUWQKK1FJREFRQUJvMU13VVRBZEJnTlZIUTRFRmdRVUJoK0FUT0ovMTZlMDhxYkltTW1KRitsajBFRXdId1lEVlIwagpCQmd3Rm9BVUJoK0FUT0ovMTZlMDhxYkltTW1KRitsajBFRXdEd1lEVlIwVEFRSC9CQVV3QXdFQi96QU5CZ2txCmhraUc5dzBCQVFzRkFBT0NBZ0VBeUswV0V5Vy9yYTFiYnVKazF1WG85RUdVUlp3OWYzcklsR1gzd3lKMTdCYzYKd3BDeUZnZ3h5T1ZsWVg3empyN3EwY2dNNGFKL1ZWUHEzUVk3WE8yNDZNYm1BcUpFMFRpeVUyazBtcGVQRXkyVApncGJUdHhCWkZaak8zRko5Y1dhVmlCR2xBaURxMEV5cWlmWEljZk5zd29VdXZVeTBjNWNTZVAwekMvaUxzZEU1ClBPZEtNbXc3NGZWdzg3MUxDcFIrcGhnZFBwSTFCaHdnem1nZms3QXZGYmxlYU1ITUt2S3NZZWh0NktPSHNtRkUKbzZaaTgvcTI1SFNEKzE0RkNyZ0x3Zm9NazdDUUlpbVJJVVJRNUh3Q2s0cGZ5SlJXcXRiUVJEYmhMeUFpQ3g4Kwpzb0FEaXRNSVFPdTlEN1haTnhXQ1BYZ25pN2VGaUR3UmtseE9DVmwxTC9hYzU2bTlmS1NjbyttN2VZZTJpY0E1CnBtVnV3ZDlKSDhRRGlrNG05KzY3dVZWSzVqVGR2WGZqVFZtNlhVOG1MWUo2WGU4UFlwUmNVZWZJS0xMRmw1d0QKTGRrZlZUa244RFMrZ1hBNGtNSDFGM0xHVXlrNTFQSTlWZWdPVGRhMzdPWUxYeURuS1FiQmxncHFxUCtTRGdDbQpDN3dqakZIeFFvUGZxVjVpOTFjdkNWc1hNdFFwdUJzZmM4blVnZ205QjMxMUE0RTc4dEZwRkNtUmxJMHUxQ0Q1CnFMc2c4ZG5lc1ltWis2WjNvcG5RL0x0SWNEZGVnUDhBWitHSkUxWllrZ2M5R3BkSjZHRXc4bWJUdG9rNmRtTHkKZWQ3NkcvV0d5OVprMjFTQU9HYjYzRG1pc3lweVN5bjhhcGpDWXEvRDBQTXNwRnJjenFKS1AzenVqYWI4bjdnPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_appgateway/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_appgateway/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_appgateway/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_appgateway/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_appgateway/tasks/main.yml new file mode 100644 index 000000000..1367300c7 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_appgateway/tasks/main.yml @@ -0,0 +1,2628 @@ +- name: Prepare random number + set_fact: + rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + cert1_file: "cert1.txt" + cert2_file: "cert2.txt" + cert3b64_file: "cert3b64.txt" + run_once: yes +- name: Set json query to only retrieve gateways from this test + set_fact: + query: "[?ends_with(name, `{{ rpfx }}`)]" + run_once: yes + +- name: Load app gateways + azure_rm_appgateway_info: + register: appgw_output +- name: Assert there are no gateways + assert: + that: + - appgw_output.gateways | community.general.json_query(query) | length == 0 + +- name: Create a virtual network + azure_rm_virtualnetwork: + name: vnet{{ rpfx }} + resource_group: "{{ resource_group }}" + address_prefixes_cidr: + - 10.1.0.0/16 + - 172.100.0.0/16 + dns_servers: + - 127.0.0.1 + - 127.0.0.2 + register: vnet_output +- name: Create a subnet + azure_rm_subnet: + name: subnet{{ rpfx }} + virtual_network_name: vnet{{ rpfx }} + resource_group: "{{ resource_group }}" + address_prefix_cidr: 10.1.0.0/24 + register: subnet_output + +- name: Create instance of Application Gateway -- check mode + azure_rm_appgateway: + resource_group: "{{ resource_group }}" + name: "appgateway{{ rpfx }}" + sku: + name: standard_small + tier: standard + capacity: 2 + ssl_policy: + policy_type: predefined + policy_name: ssl_policy20150501 + authentication_certificates: + - name: cert1 + data: "{{ lookup('file', cert1_file) }}" + ssl_certificates: + - name: cert2 + password: your-password + data: "{{ lookup('file', cert2_file) }}" + gateway_ip_configurations: + - subnet: + id: "{{ subnet_output.state.id }}" + name: app_gateway_ip_config + frontend_ip_configurations: + - subnet: + id: "{{ subnet_output.state.id }}" + name: sample_gateway_frontend_ip_config + frontend_ports: + - port: 90 + name: ag_frontend_port + - port: 80 + name: http_frontend_port + backend_address_pools: + - backend_addresses: + - ip_address: 10.0.0.4 + name: test_backend_address_pool + probes: + - name: custom_probe + protocol: http + host: 10.0.0.4 + path: /healthz + interval: 30 + timeout: 30 + unhealthy_threshold: 3 + backend_http_settings_collection: + - port: 80 + protocol: http + cookie_based_affinity: enabled + probe: custom_probe + name: sample_appgateway_http_settings + http_listeners: + - frontend_ip_configuration: sample_gateway_frontend_ip_config + frontend_port: ag_frontend_port + protocol: https + ssl_certificate: cert2 + name: sample_http_listener + - frontend_ip_configuration: sample_gateway_frontend_ip_config + frontend_port: http_frontend_port + protocol: http + name: http_listener + request_routing_rules: + - rule_type: Basic + backend_address_pool: test_backend_address_pool + backend_http_settings: sample_appgateway_http_settings + http_listener: sample_http_listener + name: rule1 + - rule_type: Basic + http_listener: http_listener + redirect_configuration: redirect_site_to_https + name: http_redirect_rule + redirect_configurations: + - redirect_type: permanent + target_listener: sample_http_listener + include_path: true + include_query_string: true + name: redirect_site_to_https + register: output + check_mode: yes +- name: Assert the resource instance is well created + assert: + that: + - output.changed + +- name: Create instance of Application Gateway + azure_rm_appgateway: + resource_group: "{{ resource_group }}" + name: "appgateway{{ rpfx }}" + sku: + name: standard_small + tier: standard + capacity: 2 + ssl_policy: + policy_type: predefined + policy_name: ssl_policy20150501 + authentication_certificates: + - name: cert1 + data: "{{ lookup('file', cert1_file) }}" + ssl_certificates: + - name: cert2 + password: your-password + data: "{{ lookup('file', cert2_file) }}" + gateway_ip_configurations: + - subnet: + id: "{{ subnet_output.state.id }}" + name: app_gateway_ip_config + frontend_ip_configurations: + - subnet: + id: "{{ subnet_output.state.id }}" + name: sample_gateway_frontend_ip_config + frontend_ports: + - port: 90 + name: ag_frontend_port + - port: 80 + name: http_frontend_port + backend_address_pools: + - backend_addresses: + - ip_address: 10.0.0.4 + name: test_backend_address_pool + probes: + - name: custom_probe + protocol: http + host: 10.0.0.4 + path: /healthz + interval: 30 + timeout: 30 + unhealthy_threshold: 3 + backend_http_settings_collection: + - port: 80 + protocol: http + cookie_based_affinity: enabled + probe: custom_probe + name: sample_appgateway_http_settings + http_listeners: + - frontend_ip_configuration: sample_gateway_frontend_ip_config + frontend_port: ag_frontend_port + protocol: https + ssl_certificate: cert2 + name: sample_http_listener + - frontend_ip_configuration: sample_gateway_frontend_ip_config + frontend_port: http_frontend_port + protocol: http + name: http_listener + request_routing_rules: + - rule_type: Basic + backend_address_pool: test_backend_address_pool + backend_http_settings: sample_appgateway_http_settings + http_listener: sample_http_listener + name: rule1 + - rule_type: Basic + http_listener: http_listener + redirect_configuration: redirect_site_to_https + name: http_redirect_rule + redirect_configurations: + - redirect_type: permanent + target_listener: sample_http_listener + include_path: true + include_query_string: true + name: redirect_site_to_https + register: output +- name: Assert the resource instance is well created + assert: + that: + - output.changed + +- name: Load app gateway + azure_rm_appgateway_info: + name: "appgateway{{ rpfx }}" + resource_group: "{{ resource_group }}" + register: output +- name: Assert properties + assert: + that: + - output.gateways | length == 1 + - output.gateways[0].name == 'appgateway{{ rpfx }}' + - output.gateways[0].ssl_policy + - output.gateways[0].ssl_policy.policy_type == 'predefined' + - output.gateways[0].ssl_policy.policy_name == 'ssl_policy20150501' + +- name: Stop instance of Application Gateway -- check mode + azure_rm_appgateway: + resource_group: "{{ resource_group }}" + name: "appgateway{{ rpfx }}" + gateway_state: stopped + check_mode: true + register: output +- name: Assert the resource instance is stopped + assert: + that: + - output.changed + +- name: Stop instance of Application Gateway + azure_rm_appgateway: + resource_group: "{{ resource_group }}" + name: "appgateway{{ rpfx }}" + gateway_state: stopped + register: output +- name: Assert the resource instance is stopped + assert: + that: + - output.changed + - output.operational_state == 'Stopped' + +- name: Stop already stopped instance of Application Gateway + azure_rm_appgateway: + resource_group: "{{ resource_group }}" + name: "appgateway{{ rpfx }}" + gateway_state: stopped + register: output +- name: Assert the resource instance did not change + assert: + that: + - not output.changed + - output.operational_state == 'Stopped' + +- name: Start instance of Application Gateway -- check mode + azure_rm_appgateway: + resource_group: "{{ resource_group }}" + name: "appgateway{{ rpfx }}" + gateway_state: started + check_mode: true + register: output +- name: Assert the resource instance is started + assert: + that: + - output.changed + +- name: Start instance of Application Gateway + azure_rm_appgateway: + resource_group: "{{ resource_group }}" + name: "appgateway{{ rpfx }}" + gateway_state: started + register: output +- name: Assert the resource instance is started + assert: + that: + - output.changed + - output.operational_state == 'Running' + +- name: Start already started instance of Application Gateway + azure_rm_appgateway: + resource_group: "{{ resource_group }}" + name: "appgateway{{ rpfx }}" + gateway_state: started + register: output +- name: Assert the resource instance did not change + assert: + that: + - not output.changed + - output.operational_state == 'Running' + +- name: Try to update instance of Application Gateway - no change + azure_rm_appgateway: + resource_group: "{{ resource_group }}" + name: "appgateway{{ rpfx }}" + sku: + name: standard_small + tier: standard + capacity: 2 + ssl_policy: + policy_type: predefined + policy_name: ssl_policy20150501 + authentication_certificates: + - name: cert1 + data: "{{ lookup('file', cert1_file) }}" + ssl_certificates: + - name: cert2 + password: your-password + data: "{{ lookup('file', cert2_file) }}" + gateway_ip_configurations: + - subnet: + id: "{{ subnet_output.state.id }}" + name: app_gateway_ip_config + frontend_ip_configurations: + - subnet: + id: "{{ subnet_output.state.id }}" + name: sample_gateway_frontend_ip_config + frontend_ports: + - port: 90 + name: ag_frontend_port + - port: 80 + name: http_frontend_port + backend_address_pools: + - backend_addresses: + - ip_address: 10.0.0.4 + name: test_backend_address_pool + probes: + - name: custom_probe + protocol: http + host: 10.0.0.4 + path: /healthz + interval: 30 + timeout: 30 + unhealthy_threshold: 3 + backend_http_settings_collection: + - port: 80 + protocol: http + cookie_based_affinity: enabled + probe: custom_probe + name: sample_appgateway_http_settings + http_listeners: + - frontend_ip_configuration: sample_gateway_frontend_ip_config + frontend_port: ag_frontend_port + protocol: https + ssl_certificate: cert2 + name: sample_http_listener + - frontend_ip_configuration: sample_gateway_frontend_ip_config + frontend_port: http_frontend_port + protocol: http + name: http_listener + request_routing_rules: + - rule_type: Basic + backend_address_pool: test_backend_address_pool + backend_http_settings: sample_appgateway_http_settings + http_listener: sample_http_listener + name: rule1 + - rule_type: Basic + http_listener: http_listener + redirect_configuration: redirect_site_to_https + name: http_redirect_rule + redirect_configurations: + - redirect_type: permanent + target_listener: sample_http_listener + include_path: true + include_query_string: true + name: redirect_site_to_https + register: output +- name: Assert the resource instance is not changed + assert: + that: + - not output.changed + +- name: Try to update instance of Application Gateway - single change + azure_rm_appgateway: + resource_group: "{{ resource_group }}" + name: "appgateway{{ rpfx }}" + sku: + name: standard_small + tier: standard + capacity: 2 + ssl_policy: + policy_type: predefined + policy_name: ssl_policy20150501 + authentication_certificates: + - name: cert1 + data: "{{ lookup('file', cert1_file) }}" + ssl_certificates: + - name: cert2 + password: your-password + data: "{{ lookup('file', cert2_file) }}" + gateway_ip_configurations: + - subnet: + id: "{{ subnet_output.state.id }}" + name: app_gateway_ip_config + frontend_ip_configurations: + - subnet: + id: "{{ subnet_output.state.id }}" + name: sample_gateway_frontend_ip_config + frontend_ports: + - port: 90 + name: ag_frontend_port + - port: 80 + name: http_frontend_port + backend_address_pools: + - backend_addresses: + - ip_address: 10.0.0.4 + name: test_backend_address_pool + probes: + - name: custom_probe + protocol: http + host: 10.0.0.4 + path: /healthz + interval: 30 + timeout: 30 + unhealthy_threshold: 3 + backend_http_settings_collection: + - port: 81 + protocol: http + cookie_based_affinity: enabled + probe: custom_probe + name: sample_appgateway_http_settings + http_listeners: + - frontend_ip_configuration: sample_gateway_frontend_ip_config + frontend_port: ag_frontend_port + protocol: https + ssl_certificate: cert2 + name: sample_http_listener + - frontend_ip_configuration: sample_gateway_frontend_ip_config + frontend_port: http_frontend_port + protocol: http + name: http_listener + request_routing_rules: + - rule_type: Basic + backend_address_pool: test_backend_address_pool + backend_http_settings: sample_appgateway_http_settings + http_listener: sample_http_listener + name: rule1 + - rule_type: Basic + http_listener: http_listener + redirect_configuration: redirect_site_to_https + name: http_redirect_rule + redirect_configurations: + - redirect_type: permanent + target_listener: sample_http_listener + include_path: true + include_query_string: true + name: redirect_site_to_https + register: output +- name: Assert the resource instance is updated + assert: + that: + - output.changed + +- name: Update SSL config for Application Gateway + azure_rm_appgateway: + resource_group: "{{ resource_group }}" + name: "appgateway{{ rpfx }}" + sku: + name: standard_small + tier: standard + capacity: 2 + ssl_policy: + policy_type: custom + cipher_suites: + - tls_ecdhe_rsa_with_aes_128_gcm_sha256 + - tls_ecdhe_ecdsa_with_aes_128_gcm_sha256 + - tls_ecdhe_ecdsa_with_aes_256_gcm_sha384 + min_protocol_version: tls_v1_2 + authentication_certificates: + - name: cert1 + data: "{{ lookup('file', cert1_file) }}" + ssl_certificates: + - name: cert2 + password: your-password + data: "{{ lookup('file', cert2_file) }}" + gateway_ip_configurations: + - subnet: + id: "{{ subnet_output.state.id }}" + name: app_gateway_ip_config + frontend_ip_configurations: + - subnet: + id: "{{ subnet_output.state.id }}" + name: sample_gateway_frontend_ip_config + frontend_ports: + - port: 90 + name: ag_frontend_port + - port: 80 + name: http_frontend_port + backend_address_pools: + - backend_addresses: + - ip_address: 10.0.0.4 + name: test_backend_address_pool + probes: + - name: custom_probe + protocol: http + host: 10.0.0.4 + path: /healthz + interval: 30 + timeout: 30 + unhealthy_threshold: 3 + backend_http_settings_collection: + - port: 81 + protocol: http + cookie_based_affinity: enabled + probe: custom_probe + name: sample_appgateway_http_settings + http_listeners: + - frontend_ip_configuration: sample_gateway_frontend_ip_config + frontend_port: ag_frontend_port + protocol: https + ssl_certificate: cert2 + name: sample_http_listener + - frontend_ip_configuration: sample_gateway_frontend_ip_config + frontend_port: http_frontend_port + protocol: http + name: http_listener + request_routing_rules: + - rule_type: Basic + backend_address_pool: test_backend_address_pool + backend_http_settings: sample_appgateway_http_settings + http_listener: sample_http_listener + name: rule1 + - rule_type: Basic + http_listener: http_listener + redirect_configuration: redirect_site_to_https + name: http_redirect_rule + redirect_configurations: + - redirect_type: permanent + target_listener: sample_http_listener + include_path: true + include_query_string: true + name: redirect_site_to_https + register: output +- name: Assert the resource instance is updated + assert: + that: + - output.changed + +- name: Load app gateway + azure_rm_appgateway_info: + name: "appgateway{{ rpfx }}" + resource_group: "{{ resource_group }}" + register: output +- name: Assert SSL config updated + assert: + that: + - output.gateways[0].ssl_policy.policy_type == 'custom' + +- name: Try to update SSL config for Application Gateway - no change + azure_rm_appgateway: + resource_group: "{{ resource_group }}" + name: "appgateway{{ rpfx }}" + sku: + name: standard_small + tier: standard + capacity: 2 + ssl_policy: + policy_type: custom + cipher_suites: + - tls_ecdhe_rsa_with_aes_128_gcm_sha256 + - tls_ecdhe_ecdsa_with_aes_128_gcm_sha256 + - tls_ecdhe_ecdsa_with_aes_256_gcm_sha384 + min_protocol_version: tls_v1_2 + authentication_certificates: + - name: cert1 + data: "{{ lookup('file', cert1_file) }}" + ssl_certificates: + - name: cert2 + password: your-password + data: "{{ lookup('file', cert2_file) }}" + gateway_ip_configurations: + - subnet: + id: "{{ subnet_output.state.id }}" + name: app_gateway_ip_config + frontend_ip_configurations: + - subnet: + id: "{{ subnet_output.state.id }}" + name: sample_gateway_frontend_ip_config + frontend_ports: + - port: 90 + name: ag_frontend_port + - port: 80 + name: http_frontend_port + backend_address_pools: + - backend_addresses: + - ip_address: 10.0.0.4 + name: test_backend_address_pool + probes: + - name: custom_probe + protocol: http + host: 10.0.0.4 + path: /healthz + interval: 30 + timeout: 30 + unhealthy_threshold: 3 + backend_http_settings_collection: + - port: 81 + protocol: http + cookie_based_affinity: enabled + probe: custom_probe + name: sample_appgateway_http_settings + http_listeners: + - frontend_ip_configuration: sample_gateway_frontend_ip_config + frontend_port: ag_frontend_port + protocol: https + ssl_certificate: cert2 + name: sample_http_listener + - frontend_ip_configuration: sample_gateway_frontend_ip_config + frontend_port: http_frontend_port + protocol: http + name: http_listener + request_routing_rules: + - rule_type: Basic + backend_address_pool: test_backend_address_pool + backend_http_settings: sample_appgateway_http_settings + http_listener: sample_http_listener + name: rule1 + - rule_type: Basic + http_listener: http_listener + redirect_configuration: redirect_site_to_https + name: http_redirect_rule + redirect_configurations: + - redirect_type: permanent + target_listener: sample_http_listener + include_path: true + include_query_string: true + name: redirect_site_to_https + register: output +- name: Assert the resource instance is not changed + assert: + that: + - not output.changed + +- name: Create instance of Application Gateway by looking up virtual network and subnet + azure_rm_appgateway: + resource_group: "{{ resource_group }}" + name: "appgateway-subnet-lookup{{ rpfx }}" + sku: + name: standard_small + tier: standard + capacity: 2 + ssl_policy: + policy_type: predefined + policy_name: ssl_policy20150501 + authentication_certificates: + - name: cert1 + data: "{{ lookup('file', cert1_file) }}" + ssl_certificates: + - name: cert2 + password: your-password + data: "{{ lookup('file', cert2_file) }}" + gateway_ip_configurations: + - subnet: + name: subnet{{ rpfx }} + virtual_network_name: vnet{{ rpfx }} + name: app_gateway_ip_config + frontend_ip_configurations: + - subnet: + name: subnet{{ rpfx }} + virtual_network_name: vnet{{ rpfx }} + name: sample_gateway_frontend_ip_config + frontend_ports: + - port: 90 + name: ag_frontend_port + - port: 80 + name: http_frontend_port + backend_address_pools: + - backend_addresses: + - ip_address: 10.0.0.4 + name: test_backend_address_pool + probes: + - name: custom_probe + protocol: http + host: 10.0.0.4 + path: /healthz + interval: 30 + timeout: 30 + unhealthy_threshold: 3 + backend_http_settings_collection: + - port: 80 + protocol: http + cookie_based_affinity: enabled + probe: custom_probe + name: sample_appgateway_http_settings + http_listeners: + - frontend_ip_configuration: sample_gateway_frontend_ip_config + frontend_port: ag_frontend_port + protocol: https + ssl_certificate: cert2 + name: sample_http_listener + - frontend_ip_configuration: sample_gateway_frontend_ip_config + frontend_port: http_frontend_port + protocol: http + name: http_listener + request_routing_rules: + - rule_type: Basic + backend_address_pool: test_backend_address_pool + backend_http_settings: sample_appgateway_http_settings + http_listener: sample_http_listener + name: rule1 + - rule_type: Basic + http_listener: http_listener + redirect_configuration: redirect_site_to_https + name: http_redirect_rule + redirect_configurations: + - redirect_type: permanent + target_listener: sample_http_listener + include_path: true + include_query_string: true + name: redirect_site_to_https + register: output +- name: Assert the resource instance is well created + assert: + that: + - output.changed + +- name: Load app gateways + azure_rm_appgateway_info: + register: appgw_output +- name: Assert there are the correct number of gateways + assert: + that: + - appgw_output.gateways | community.general.json_query(query) | length == 2 + +- name: Load app gateway by name + azure_rm_appgateway_info: + name: "appgateway-subnet-lookup{{ rpfx }}" + resource_group: "{{ resource_group }}" + register: appgw_output +- name: Assert there are the correct number of gateways + assert: + that: + - appgw_output.gateways | community.general.json_query(query) | length == 1 + - (appgw_output.gateways | community.general.json_query(query))[0].name == 'appgateway-subnet-lookup{{ rpfx }}' + - (appgw_output.gateways | community.general.json_query(query))[0].provisioning_state == 'Succeeded' + - (appgw_output.gateways | community.general.json_query(query))[0].operational_state == 'Running' + +- name: Create instance of Application Gateway with probe using hostname from backend http settings + azure_rm_appgateway: + resource_group: "{{ resource_group_secondary }}" + name: "appgateway-probe-{{ rpfx }}" + location: "{{ vnet_output.state.location }}" + sku: + name: standard_small + tier: standard + capacity: 2 + ssl_policy: + policy_type: predefined + policy_name: ssl_policy20150501 + authentication_certificates: + - name: cert1 + data: "{{ lookup('file', cert1_file) }}" + ssl_certificates: + - name: cert2 + password: your-password + data: "{{ lookup('file', cert2_file) }}" + gateway_ip_configurations: + - subnet: + id: "{{ subnet_output.state.id }}" + name: app_gateway_ip_config + frontend_ip_configurations: + - subnet: + id: "{{ subnet_output.state.id }}" + name: sample_gateway_frontend_ip_config + frontend_ports: + - port: 90 + name: ag_frontend_port + - port: 80 + name: http_frontend_port + backend_address_pools: + - backend_addresses: + - ip_address: 10.0.0.4 + name: test_backend_address_pool + probes: + - name: custom_probe + protocol: http + path: /healthz + interval: 30 + timeout: 30 + unhealthy_threshold: 3 + pick_host_name_from_backend_http_settings: true + backend_http_settings_collection: + - port: 80 + protocol: http + cookie_based_affinity: enabled + probe: custom_probe + name: sample_appgateway_http_settings + pick_host_name_from_backend_address: true + http_listeners: + - frontend_ip_configuration: sample_gateway_frontend_ip_config + frontend_port: ag_frontend_port + protocol: https + ssl_certificate: cert2 + name: sample_http_listener + - frontend_ip_configuration: sample_gateway_frontend_ip_config + frontend_port: http_frontend_port + protocol: http + name: http_listener + request_routing_rules: + - rule_type: Basic + backend_address_pool: test_backend_address_pool + backend_http_settings: sample_appgateway_http_settings + http_listener: sample_http_listener + name: rule1 + - rule_type: Basic + http_listener: http_listener + redirect_configuration: redirect_site_to_https + name: http_redirect_rule + redirect_configurations: + - redirect_type: permanent + target_listener: sample_http_listener + include_path: true + include_query_string: true + name: redirect_site_to_https + register: output +- name: Assert the resource instance is well created + assert: + that: + - output.changed + +- name: Test no update instance of Application Gateway with probe using hostname from backend http settings + azure_rm_appgateway: + resource_group: "{{ resource_group_secondary }}" + name: "appgateway-probe-{{ rpfx }}" + location: "{{ vnet_output.state.location }}" + sku: + name: standard_small + tier: standard + capacity: 2 + ssl_policy: + policy_type: predefined + policy_name: ssl_policy20150501 + authentication_certificates: + - name: cert1 + data: "{{ lookup('file', cert1_file) }}" + ssl_certificates: + - name: cert2 + password: your-password + data: "{{ lookup('file', cert2_file) }}" + gateway_ip_configurations: + - subnet: + id: "{{ subnet_output.state.id }}" + name: app_gateway_ip_config + frontend_ip_configurations: + - subnet: + id: "{{ subnet_output.state.id }}" + name: sample_gateway_frontend_ip_config + frontend_ports: + - port: 90 + name: ag_frontend_port + - port: 80 + name: http_frontend_port + backend_address_pools: + - backend_addresses: + - ip_address: 10.0.0.4 + name: test_backend_address_pool + probes: + - name: custom_probe + protocol: http + path: /healthz + interval: 30 + timeout: 30 + unhealthy_threshold: 3 + pick_host_name_from_backend_http_settings: true + backend_http_settings_collection: + - port: 80 + protocol: http + cookie_based_affinity: enabled + probe: custom_probe + name: sample_appgateway_http_settings + pick_host_name_from_backend_address: true + http_listeners: + - frontend_ip_configuration: sample_gateway_frontend_ip_config + frontend_port: ag_frontend_port + protocol: https + ssl_certificate: cert2 + name: sample_http_listener + - frontend_ip_configuration: sample_gateway_frontend_ip_config + frontend_port: http_frontend_port + protocol: http + name: http_listener + request_routing_rules: + - rule_type: Basic + backend_address_pool: test_backend_address_pool + backend_http_settings: sample_appgateway_http_settings + http_listener: sample_http_listener + name: rule1 + - rule_type: Basic + http_listener: http_listener + redirect_configuration: redirect_site_to_https + name: http_redirect_rule + redirect_configurations: + - redirect_type: permanent + target_listener: sample_http_listener + include_path: true + include_query_string: true + name: redirect_site_to_https + register: output +- name: Assert the resource instance is not changed + assert: + that: + - not output.changed + +- name: Load app gateway by resource group + azure_rm_appgateway_info: + resource_group: "{{ resource_group_secondary }}" + register: appgw_output +- name: Assert there are the correct number of gateways and they are the right ones + assert: + that: + - appgw_output.gateways | community.general.json_query(query) | length == 1 + - (appgw_output.gateways | community.general.json_query(query))[0].name == 'appgateway-probe-{{ rpfx }}' + +- name: Load app gateway not found in resource group + azure_rm_appgateway_info: + name: "appgateway{{ rpfx }}" + resource_group: "{{ resource_group_secondary }}" + register: appgw_output +- name: Assert there are no results + assert: + that: + - appgw_output.gateways | community.general.json_query(query) | length == 0 + +- name: Create instance of Application Gateway with path based rules + azure_rm_appgateway: + resource_group: "{{ resource_group }}" + name: "appgateway-path-rules{{ rpfx }}" + sku: + name: standard_small + tier: standard + capacity: 2 + ssl_policy: + policy_type: predefined + policy_name: ssl_policy20150501 + authentication_certificates: + - name: cert1 + data: "{{ lookup('file', cert1_file) }}" + ssl_certificates: + - name: cert2 + password: your-password + data: "{{ lookup('file', cert2_file) }}" + gateway_ip_configurations: + - subnet: + id: "{{ subnet_output.state.id }}" + name: app_gateway_ip_config + frontend_ip_configurations: + - subnet: + id: "{{ subnet_output.state.id }}" + name: sample_gateway_frontend_ip_config + frontend_ports: + - port: 90 + name: ag_frontend_port + - port: 80 + name: http_frontend_port + backend_address_pools: + - backend_addresses: + - ip_address: 10.0.0.4 + name: test_backend_address_pool + probes: + - name: custom_probe + protocol: http + host: 10.0.0.4 + path: /healthz + interval: 30 + timeout: 30 + unhealthy_threshold: 3 + backend_http_settings_collection: + - port: 80 + protocol: http + cookie_based_affinity: enabled + probe: custom_probe + name: sample_appgateway_http_settings + http_listeners: + - frontend_ip_configuration: sample_gateway_frontend_ip_config + frontend_port: ag_frontend_port + protocol: https + ssl_certificate: cert2 + name: sample_http_listener + - frontend_ip_configuration: sample_gateway_frontend_ip_config + frontend_port: http_frontend_port + protocol: http + name: http_listener + request_routing_rules: + - rule_type: path_based_routing + http_listener: sample_http_listener + name: rule1 + url_path_map: path_mappings + - rule_type: Basic + http_listener: http_listener + redirect_configuration: redirect_site_to_https + name: http_redirect_rule + url_path_maps: + - name: path_mappings + default_backend_address_pool: test_backend_address_pool + default_backend_http_settings: sample_appgateway_http_settings + path_rules: + - name: path_rules + backend_address_pool: test_backend_address_pool + backend_http_settings: sample_appgateway_http_settings + paths: + - "/abc" + - "/123/*" + redirect_configurations: + - redirect_type: permanent + target_listener: sample_http_listener + include_path: true + include_query_string: true + name: redirect_site_to_https + register: output +- name: Assert the resource instance is well created + assert: + that: + - output.changed + +- name: Try to update instance of Application Gateway with path based rules - no change + azure_rm_appgateway: + resource_group: "{{ resource_group }}" + name: "appgateway-path-rules{{ rpfx }}" + sku: + name: standard_small + tier: standard + capacity: 2 + ssl_policy: + policy_type: predefined + policy_name: ssl_policy20150501 + authentication_certificates: + - name: cert1 + data: "{{ lookup('file', cert1_file) }}" + ssl_certificates: + - name: cert2 + password: your-password + data: "{{ lookup('file', cert2_file) }}" + gateway_ip_configurations: + - subnet: + id: "{{ subnet_output.state.id }}" + name: app_gateway_ip_config + frontend_ip_configurations: + - subnet: + id: "{{ subnet_output.state.id }}" + name: sample_gateway_frontend_ip_config + frontend_ports: + - port: 90 + name: ag_frontend_port + - port: 80 + name: http_frontend_port + backend_address_pools: + - backend_addresses: + - ip_address: 10.0.0.4 + name: test_backend_address_pool + probes: + - name: custom_probe + protocol: http + host: 10.0.0.4 + path: /healthz + interval: 30 + timeout: 30 + unhealthy_threshold: 3 + backend_http_settings_collection: + - port: 80 + protocol: http + cookie_based_affinity: enabled + probe: custom_probe + name: sample_appgateway_http_settings + http_listeners: + - frontend_ip_configuration: sample_gateway_frontend_ip_config + frontend_port: ag_frontend_port + protocol: https + ssl_certificate: cert2 + name: sample_http_listener + - frontend_ip_configuration: sample_gateway_frontend_ip_config + frontend_port: http_frontend_port + protocol: http + name: http_listener + request_routing_rules: + - rule_type: path_based_routing + http_listener: sample_http_listener + name: rule1 + url_path_map: path_mappings + - rule_type: Basic + http_listener: http_listener + redirect_configuration: redirect_site_to_https + name: http_redirect_rule + url_path_maps: + - name: path_mappings + default_backend_address_pool: test_backend_address_pool + default_backend_http_settings: sample_appgateway_http_settings + path_rules: + - name: path_rules + backend_address_pool: test_backend_address_pool + backend_http_settings: sample_appgateway_http_settings + paths: + - "/abc" + - "/123/*" + redirect_configurations: + - redirect_type: permanent + target_listener: sample_http_listener + include_path: true + include_query_string: true + name: redirect_site_to_https + register: output +- name: Assert the resource instance is not changed + assert: + that: + - not output.changed + +- name: Create instance of Application Gateway with complex routing and redirect rules + azure_rm_appgateway: + resource_group: "{{ resource_group }}" + name: "appgateway-complex{{ rpfx }}" + sku: + name: standard_small + tier: standard + capacity: 2 + ssl_policy: + policy_type: "predefined" + policy_name: "ssl_policy20170401_s" + ssl_certificates: + - name: cert2 + password: your-password + data: "{{ lookup('file', cert2_file) }}" + gateway_ip_configurations: + - subnet: + id: "{{ subnet_output.state.id }}" + name: app_gateway_ip_config + frontend_ip_configurations: + - subnet: + id: "{{ subnet_output.state.id }}" + name: sample_gateway_frontend_ip_config + frontend_ports: + - name: "inbound-http" + port: 80 + - name: "inbound-https" + port: 443 + backend_address_pools: + - name: test_backend_address_pool1 + backend_addresses: + - ip_address: 10.0.0.1 + - name: test_backend_address_pool2 + backend_addresses: + - ip_address: 10.0.0.2 + backend_http_settings_collection: + - name: "http-profile1" + port: 443 + protocol: https + pick_host_name_from_backend_address: true + probe: "http-probe1" + cookie_based_affinity: "Disabled" + - name: "http-profile2" + port: 8080 + protocol: http + pick_host_name_from_backend_address: true + probe: "http-probe2" + cookie_based_affinity: "Disabled" + http_listeners: + - name: "inbound-http" + protocol: "http" + frontend_ip_configuration: "sample_gateway_frontend_ip_config" + frontend_port: "inbound-http" + - name: "inbound-traffic1" + protocol: "https" + frontend_ip_configuration: "sample_gateway_frontend_ip_config" + frontend_port: "inbound-https" + host_name: "traffic1.example.com" + require_server_name_indication: true + ssl_certificate: "cert2" + - name: "inbound-traffic2" + protocol: "https" + frontend_ip_configuration: "sample_gateway_frontend_ip_config" + frontend_port: "inbound-https" + host_name: "traffic2.example.com" + require_server_name_indication: true + ssl_certificate: "cert2" + url_path_maps: + - name: "path_mappings" + default_redirect_configuration: "redirect-traffic1" + path_rules: + - name: "path_rules" + backend_address_pool: "test_backend_address_pool1" + backend_http_settings: "http-profile1" + paths: + - "/abc" + - "/123/*" + request_routing_rules: + - name: "app-routing1" + rule_type: "basic" + http_listener: "inbound-traffic1" + backend_address_pool: "test_backend_address_pool2" + backend_http_settings: "http-profile1" + - name: "app-routing2" + rule_type: "path_based_routing" + http_listener: "inbound-traffic2" + url_path_map: "path_mappings" + - name: "redirect-routing" + rule_type: "basic" + http_listener: "inbound-http" + redirect_configuration: "redirect-http" + probes: + - name: "http-probe1" + interval: 30 + path: "/abc" + protocol: "https" + pick_host_name_from_backend_http_settings: true + timeout: 30 + unhealthy_threshold: 2 + - name: "http-probe2" + interval: 30 + path: "/xyz" + protocol: "http" + pick_host_name_from_backend_http_settings: true + timeout: 30 + unhealthy_threshold: 2 + redirect_configurations: + - name: "redirect-http" + redirect_type: "permanent" + target_listener: "inbound-traffic1" + include_path: true + include_query_string: true + request_routing_rules: + - "redirect-routing" + - name: "redirect-traffic1" + redirect_type: "found" + target_listener: "inbound-traffic1" + include_path: true + include_query_string: true + url_path_maps: + - "path_mappings" + register: output +- name: Assert the resource instance is well created + assert: + that: + - output.changed + +- name: Try to update instance of Application Gateway with complex routing and redirect rules - no change + azure_rm_appgateway: + resource_group: "{{ resource_group }}" + name: "appgateway-complex{{ rpfx }}" + sku: + name: standard_small + tier: standard + capacity: 2 + ssl_policy: + policy_type: "predefined" + policy_name: "ssl_policy20170401_s" + ssl_certificates: + - name: cert2 + password: your-password + data: "{{ lookup('file', cert2_file) }}" + gateway_ip_configurations: + - subnet: + id: "{{ subnet_output.state.id }}" + name: app_gateway_ip_config + frontend_ip_configurations: + - subnet: + id: "{{ subnet_output.state.id }}" + name: sample_gateway_frontend_ip_config + frontend_ports: + - name: "inbound-http" + port: 80 + - name: "inbound-https" + port: 443 + backend_address_pools: + - name: test_backend_address_pool1 + backend_addresses: + - ip_address: 10.0.0.1 + - name: test_backend_address_pool2 + backend_addresses: + - ip_address: 10.0.0.2 + backend_http_settings_collection: + - name: "http-profile1" + port: 443 + protocol: https + pick_host_name_from_backend_address: true + probe: "http-probe1" + cookie_based_affinity: "Disabled" + - name: "http-profile2" + port: 8080 + protocol: http + pick_host_name_from_backend_address: true + probe: "http-probe2" + cookie_based_affinity: "Disabled" + http_listeners: + - name: "inbound-http" + protocol: "http" + frontend_ip_configuration: "sample_gateway_frontend_ip_config" + frontend_port: "inbound-http" + - name: "inbound-traffic1" + protocol: "https" + frontend_ip_configuration: "sample_gateway_frontend_ip_config" + frontend_port: "inbound-https" + host_name: "traffic1.example.com" + require_server_name_indication: true + ssl_certificate: "cert2" + - name: "inbound-traffic2" + protocol: "https" + frontend_ip_configuration: "sample_gateway_frontend_ip_config" + frontend_port: "inbound-https" + host_name: "traffic2.example.com" + require_server_name_indication: true + ssl_certificate: "cert2" + url_path_maps: + - name: "path_mappings" + default_redirect_configuration: "redirect-traffic1" + path_rules: + - name: "path_rules" + backend_address_pool: "test_backend_address_pool1" + backend_http_settings: "http-profile1" + paths: + - "/abc" + - "/123/*" + request_routing_rules: + - name: "app-routing1" + rule_type: "basic" + http_listener: "inbound-traffic1" + backend_address_pool: "test_backend_address_pool2" + backend_http_settings: "http-profile1" + - name: "app-routing2" + rule_type: "path_based_routing" + http_listener: "inbound-traffic2" + url_path_map: "path_mappings" + - name: "redirect-routing" + rule_type: "basic" + http_listener: "inbound-http" + redirect_configuration: "redirect-http" + probes: + - name: "http-probe1" + interval: 30 + path: "/abc" + protocol: "https" + pick_host_name_from_backend_http_settings: true + timeout: 30 + unhealthy_threshold: 2 + - name: "http-probe2" + interval: 30 + path: "/xyz" + protocol: "http" + pick_host_name_from_backend_http_settings: true + timeout: 30 + unhealthy_threshold: 2 + redirect_configurations: + - name: "redirect-http" + redirect_type: "permanent" + target_listener: "inbound-traffic1" + include_path: true + include_query_string: true + request_routing_rules: + - "redirect-routing" + - name: "redirect-traffic1" + redirect_type: "found" + target_listener: "inbound-traffic1" + include_path: true + include_query_string: true + url_path_maps: + - "path_mappings" + register: output +- name: Assert the resource instance is not changed + assert: + that: + - not output.changed + +- name: Configure public IP for v2 gateway + azure_rm_publicipaddress: + name: "appgateway-v2-{{ rpfx }}-pip" + resource_group: "{{ resource_group }}" + sku: "standard" + allocation_method: "static" + +- name: Try to create v2 instance of Application Gateway with rewrite rules + azure_rm_appgateway: + resource_group: "{{ resource_group }}" + name: "appgateway-v2-{{ rpfx }}" + sku: + name: standard_v2 + tier: standard_v2 + capacity: 2 + ssl_policy: + policy_type: "predefined" + policy_name: "ssl_policy20170401_s" + ssl_certificates: + - name: cert2 + password: your-password + data: "{{ lookup('file', cert2_file) }}" + gateway_ip_configurations: + - subnet: + id: "{{ subnet_output.state.id }}" + name: app_gateway_ip_config + frontend_ip_configurations: + - name: "public-inbound-ip" + public_ip_address: "appgateway-v2-{{ rpfx }}-pip" + frontend_ports: + - name: "inbound-http" + port: 80 + - name: "inbound-https" + port: 443 + backend_address_pools: + - name: test_backend_address_pool1 + backend_addresses: + - ip_address: 10.0.0.1 + - name: test_backend_address_pool2 + backend_addresses: + - ip_address: 10.0.0.2 + backend_http_settings_collection: + - name: "http-profile1" + port: 443 + protocol: https + pick_host_name_from_backend_address: true + probe: "http-probe1" + cookie_based_affinity: "Disabled" + - name: "http-profile2" + port: 8080 + protocol: http + pick_host_name_from_backend_address: true + probe: "http-probe2" + cookie_based_affinity: "Disabled" + http_listeners: + - name: "inbound-http" + protocol: "http" + frontend_ip_configuration: "public-inbound-ip" + frontend_port: "inbound-http" + - name: "inbound-traffic1" + protocol: "https" + frontend_ip_configuration: "public-inbound-ip" + frontend_port: "inbound-https" + host_name: "traffic1.example.com" + require_server_name_indication: true + ssl_certificate: "cert2" + - name: "inbound-traffic2" + protocol: "https" + frontend_ip_configuration: "public-inbound-ip" + frontend_port: "inbound-https" + host_name: "traffic2.example.com" + require_server_name_indication: true + ssl_certificate: "cert2" + url_path_maps: + - name: "path_mappings" + default_redirect_configuration: "redirect-traffic1" + default_rewrite_rule_set: "configure-headers" + path_rules: + - name: "path_rules" + backend_address_pool: "test_backend_address_pool1" + backend_http_settings: "http-profile1" + paths: + - "/abc" + - "/123/*" + request_routing_rules: + - name: "app-routing1" + rule_type: "basic" + http_listener: "inbound-traffic1" + backend_address_pool: "test_backend_address_pool2" + backend_http_settings: "http-profile1" + rewrite_rule_set: "configure-headers" + - name: "app-routing2" + rule_type: "path_based_routing" + http_listener: "inbound-traffic2" + url_path_map: "path_mappings" + - name: "redirect-routing" + rule_type: "basic" + http_listener: "inbound-http" + redirect_configuration: "redirect-http" + rewrite_rule_sets: + - name: "configure-headers" + rewrite_rules: + - name: "add-security-response-header" + rule_sequence: 1 + action_set: + response_header_configurations: + - header_name: "Strict-Transport-Security" + header_value: "max-age=31536000" + - name: "remove-backend-response-headers" + rule_sequence: 2 + action_set: + response_header_configurations: + - header_name: "Server" + - header_name: "X-Powered-By" + - name: "set-custom-header-condition" + rule_sequence: 3 + conditions: + - variable: "var_client_ip" + pattern: "1.1.1.1" + - variable: "http_req_Authorization" + pattern: "12345" + ignore_case: false + action_set: + request_header_configurations: + - header_name: "Foo" + header_value: "Bar" + probes: + - name: "http-probe1" + interval: 30 + path: "/abc" + protocol: "https" + pick_host_name_from_backend_http_settings: true + timeout: 30 + unhealthy_threshold: 2 + - name: "http-probe2" + interval: 30 + path: "/xyz" + protocol: "http" + pick_host_name_from_backend_http_settings: true + timeout: 30 + unhealthy_threshold: 2 + redirect_configurations: + - name: "redirect-http" + redirect_type: "permanent" + target_listener: "inbound-traffic1" + include_path: true + include_query_string: true + request_routing_rules: + - "redirect-routing" + - name: "redirect-traffic1" + redirect_type: "found" + target_listener: "inbound-traffic1" + include_path: true + include_query_string: true + url_path_maps: + - "path_mappings" + register: output +- name: Assert the resource instance is well created + assert: + that: + - output.changed + +- name: Try to create v2 instance of Application Gateway with rewrite rules - no update + azure_rm_appgateway: + resource_group: "{{ resource_group }}" + name: "appgateway-v2-{{ rpfx }}" + sku: + name: standard_v2 + tier: standard_v2 + capacity: 2 + ssl_policy: + policy_type: "predefined" + policy_name: "ssl_policy20170401_s" + ssl_certificates: + - name: cert2 + password: your-password + data: "{{ lookup('file', cert2_file) }}" + gateway_ip_configurations: + - subnet: + id: "{{ subnet_output.state.id }}" + name: app_gateway_ip_config + frontend_ip_configurations: + - name: "public-inbound-ip" + public_ip_address: "appgateway-v2-{{ rpfx }}-pip" + frontend_ports: + - name: "inbound-http" + port: 80 + - name: "inbound-https" + port: 443 + backend_address_pools: + - name: test_backend_address_pool1 + backend_addresses: + - ip_address: 10.0.0.1 + - name: test_backend_address_pool2 + backend_addresses: + - ip_address: 10.0.0.2 + backend_http_settings_collection: + - name: "http-profile1" + port: 443 + protocol: https + pick_host_name_from_backend_address: true + probe: "http-probe1" + cookie_based_affinity: "Disabled" + - name: "http-profile2" + port: 8080 + protocol: http + pick_host_name_from_backend_address: true + probe: "http-probe2" + cookie_based_affinity: "Disabled" + http_listeners: + - name: "inbound-http" + protocol: "http" + frontend_ip_configuration: "public-inbound-ip" + frontend_port: "inbound-http" + - name: "inbound-traffic1" + protocol: "https" + frontend_ip_configuration: "public-inbound-ip" + frontend_port: "inbound-https" + host_name: "traffic1.example.com" + require_server_name_indication: true + ssl_certificate: "cert2" + - name: "inbound-traffic2" + protocol: "https" + frontend_ip_configuration: "public-inbound-ip" + frontend_port: "inbound-https" + host_name: "traffic2.example.com" + require_server_name_indication: true + ssl_certificate: "cert2" + url_path_maps: + - name: "path_mappings" + default_redirect_configuration: "redirect-traffic1" + default_rewrite_rule_set: "configure-headers" + path_rules: + - name: "path_rules" + backend_address_pool: "test_backend_address_pool1" + backend_http_settings: "http-profile1" + paths: + - "/abc" + - "/123/*" + request_routing_rules: + - name: "app-routing1" + rule_type: "basic" + http_listener: "inbound-traffic1" + backend_address_pool: "test_backend_address_pool2" + backend_http_settings: "http-profile1" + rewrite_rule_set: "configure-headers" + - name: "app-routing2" + rule_type: "path_based_routing" + http_listener: "inbound-traffic2" + url_path_map: "path_mappings" + - name: "redirect-routing" + rule_type: "basic" + http_listener: "inbound-http" + redirect_configuration: "redirect-http" + rewrite_rule_sets: + - name: "configure-headers" + rewrite_rules: + - name: "add-security-response-header" + rule_sequence: 1 + action_set: + response_header_configurations: + - header_name: "Strict-Transport-Security" + header_value: "max-age=31536000" + - name: "remove-backend-response-headers" + rule_sequence: 2 + action_set: + response_header_configurations: + - header_name: "Server" + - header_name: "X-Powered-By" + - name: "set-custom-header-condition" + rule_sequence: 3 + conditions: + - variable: "var_client_ip" + pattern: "1.1.1.1" + - variable: "http_req_Authorization" + pattern: "12345" + ignore_case: false + action_set: + request_header_configurations: + - header_name: "Foo" + header_value: "Bar" + probes: + - name: "http-probe1" + interval: 30 + path: "/abc" + protocol: "https" + pick_host_name_from_backend_http_settings: true + timeout: 30 + unhealthy_threshold: 2 + - name: "http-probe2" + interval: 30 + path: "/xyz" + protocol: "http" + pick_host_name_from_backend_http_settings: true + timeout: 30 + unhealthy_threshold: 2 + redirect_configurations: + - name: "redirect-http" + redirect_type: "permanent" + target_listener: "inbound-traffic1" + include_path: true + include_query_string: true + request_routing_rules: + - "redirect-routing" + - name: "redirect-traffic1" + redirect_type: "found" + target_listener: "inbound-traffic1" + include_path: true + include_query_string: true + url_path_maps: + - "path_mappings" + register: output +- name: Assert the resource instance is not updated + assert: + that: + - not output.changed + +- name: Try to create v2 instance of Application Gateway with rewrite rules - update rewrite rule + azure_rm_appgateway: + resource_group: "{{ resource_group }}" + name: "appgateway-v2-{{ rpfx }}" + sku: + name: standard_v2 + tier: standard_v2 + capacity: 2 + ssl_policy: + policy_type: "predefined" + policy_name: "ssl_policy20170401_s" + ssl_certificates: + - name: cert2 + password: your-password + data: "{{ lookup('file', cert2_file) }}" + gateway_ip_configurations: + - subnet: + id: "{{ subnet_output.state.id }}" + name: app_gateway_ip_config + frontend_ip_configurations: + - name: "public-inbound-ip" + public_ip_address: "appgateway-v2-{{ rpfx }}-pip" + frontend_ports: + - name: "inbound-http" + port: 80 + - name: "inbound-https" + port: 443 + backend_address_pools: + - name: test_backend_address_pool1 + backend_addresses: + - ip_address: 10.0.0.1 + - name: test_backend_address_pool2 + backend_addresses: + - ip_address: 10.0.0.2 + backend_http_settings_collection: + - name: "http-profile1" + port: 443 + protocol: https + pick_host_name_from_backend_address: true + probe: "http-probe1" + cookie_based_affinity: "Disabled" + - name: "http-profile2" + port: 8080 + protocol: http + pick_host_name_from_backend_address: true + probe: "http-probe2" + cookie_based_affinity: "Disabled" + http_listeners: + - name: "inbound-http" + protocol: "http" + frontend_ip_configuration: "public-inbound-ip" + frontend_port: "inbound-http" + - name: "inbound-traffic1" + protocol: "https" + frontend_ip_configuration: "public-inbound-ip" + frontend_port: "inbound-https" + host_name: "traffic1.example.com" + require_server_name_indication: true + ssl_certificate: "cert2" + - name: "inbound-traffic2" + protocol: "https" + frontend_ip_configuration: "public-inbound-ip" + frontend_port: "inbound-https" + host_name: "traffic2.example.com" + require_server_name_indication: true + ssl_certificate: "cert2" + url_path_maps: + - name: "path_mappings" + default_redirect_configuration: "redirect-traffic1" + default_rewrite_rule_set: "configure-headers" + path_rules: + - name: "path_rules" + backend_address_pool: "test_backend_address_pool1" + backend_http_settings: "http-profile1" + paths: + - "/abc" + - "/123/*" + request_routing_rules: + - name: "app-routing1" + rule_type: "basic" + http_listener: "inbound-traffic1" + backend_address_pool: "test_backend_address_pool2" + backend_http_settings: "http-profile1" + rewrite_rule_set: "configure-headers" + - name: "app-routing2" + rule_type: "path_based_routing" + http_listener: "inbound-traffic2" + url_path_map: "path_mappings" + - name: "redirect-routing" + rule_type: "basic" + http_listener: "inbound-http" + redirect_configuration: "redirect-http" + rewrite_rule_sets: + - name: "configure-headers" + rewrite_rules: + - name: "add-security-response-header" + rule_sequence: 1 + action_set: + response_header_configurations: + - header_name: "Strict-Transport-Security" + header_value: "max-age=31536000" + - name: "remove-backend-response-headers" + rule_sequence: 2 + action_set: + response_header_configurations: + - header_name: "Server" + - header_name: "X-Powered-By" + - name: "set-custom-header-condition" + rule_sequence: 3 + conditions: + - variable: "var_client_ip" + pattern: "1.1.1.1" + - variable: "http_req_Authorization" + pattern: "12345" + ignore_case: false + action_set: + request_header_configurations: + - header_name: "Foo" + header_value: "BarUpdated" + response_header_configurations: + - header_name: "FooResponse" + header_value: "BarResponse" + probes: + - name: "http-probe1" + interval: 30 + path: "/abc" + protocol: "https" + pick_host_name_from_backend_http_settings: true + timeout: 30 + unhealthy_threshold: 2 + - name: "http-probe2" + interval: 30 + path: "/xyz" + protocol: "http" + pick_host_name_from_backend_http_settings: true + timeout: 30 + unhealthy_threshold: 2 + redirect_configurations: + - name: "redirect-http" + redirect_type: "permanent" + target_listener: "inbound-traffic1" + include_path: true + include_query_string: true + request_routing_rules: + - "redirect-routing" + - name: "redirect-traffic1" + redirect_type: "found" + target_listener: "inbound-traffic1" + include_path: true + include_query_string: true + url_path_maps: + - "path_mappings" + register: output +- name: Assert the resource instance is updated + assert: + that: + - output.changed + +- name: Try to create v2 instance of Application Gateway with autoscale configuration and trusted root certificates + azure_rm_appgateway: + resource_group: "{{ resource_group }}" + name: "appgateway-v2-{{ rpfx }}" + sku: + name: standard_v2 + tier: standard_v2 + autoscale_configuration: + max_capacity: 2 + min_capacity: 1 + enable_http2: true + ssl_policy: + policy_type: "predefined" + policy_name: "ssl_policy20170401_s" + ssl_certificates: + - name: cert2 + password: your-password + data: "{{ lookup('file', cert2_file) }}" + trusted_root_certificates: + - name: "rootCert3" + data: "{{ lookup('file', cert3b64_file) }}" + gateway_ip_configurations: + - subnet: + id: "{{ subnet_output.state.id }}" + name: app_gateway_ip_config + frontend_ip_configurations: + - name: "public-inbound-ip" + public_ip_address: "appgateway-v2-{{ rpfx }}-pip" + frontend_ports: + - name: "inbound-http" + port: 80 + - name: "inbound-https" + port: 443 + backend_address_pools: + - name: test_backend_address_pool1 + backend_addresses: + - ip_address: 10.0.0.1 + - name: test_backend_address_pool2 + backend_addresses: + - ip_address: 10.0.0.2 + backend_http_settings_collection: + - name: "http-profile1" + port: 443 + protocol: https + pick_host_name_from_backend_address: true + probe: "http-probe1" + cookie_based_affinity: "Disabled" + connection_draining: + drain_timeout_in_sec: 60 + enabled: true + trusted_root_certificates: + - "rootCert3" + - name: "http-profile2" + port: 8080 + protocol: http + pick_host_name_from_backend_address: true + probe: "http-probe2" + cookie_based_affinity: "Disabled" + http_listeners: + - name: "inbound-http" + protocol: "http" + frontend_ip_configuration: "public-inbound-ip" + frontend_port: "inbound-http" + - name: "inbound-traffic1" + protocol: "https" + frontend_ip_configuration: "public-inbound-ip" + frontend_port: "inbound-https" + host_name: "traffic1.example.com" + require_server_name_indication: true + ssl_certificate: "cert2" + - name: "inbound-traffic2" + protocol: "https" + frontend_ip_configuration: "public-inbound-ip" + frontend_port: "inbound-https" + host_name: "traffic2.example.com" + require_server_name_indication: true + ssl_certificate: "cert2" + url_path_maps: + - name: "path_mappings" + default_redirect_configuration: "redirect-traffic1" + default_rewrite_rule_set: "configure-headers" + path_rules: + - name: "path_rules" + backend_address_pool: "test_backend_address_pool1" + backend_http_settings: "http-profile1" + paths: + - "/abc" + - "/123/*" + request_routing_rules: + - name: "app-routing1" + rule_type: "basic" + http_listener: "inbound-traffic1" + backend_address_pool: "test_backend_address_pool2" + backend_http_settings: "http-profile1" + rewrite_rule_set: "configure-headers" + - name: "app-routing2" + rule_type: "path_based_routing" + http_listener: "inbound-traffic2" + url_path_map: "path_mappings" + - name: "redirect-routing" + rule_type: "basic" + http_listener: "inbound-http" + redirect_configuration: "redirect-http" + rewrite_rule_sets: + - name: "configure-headers" + rewrite_rules: + - name: "add-security-response-header" + rule_sequence: 1 + action_set: + response_header_configurations: + - header_name: "Strict-Transport-Security" + header_value: "max-age=31536000" + - name: "remove-backend-response-headers" + rule_sequence: 2 + action_set: + response_header_configurations: + - header_name: "Server" + - header_name: "X-Powered-By" + - name: "set-custom-header-condition" + rule_sequence: 3 + conditions: + - variable: "var_client_ip" + pattern: "1.1.1.1" + - variable: "http_req_Authorization" + pattern: "12345" + ignore_case: false + action_set: + request_header_configurations: + - header_name: "Foo" + header_value: "Bar" + probes: + - name: "http-probe1" + interval: 30 + path: "/abc" + protocol: "https" + pick_host_name_from_backend_http_settings: true + timeout: 30 + unhealthy_threshold: 2 + - name: "http-probe2" + interval: 30 + path: "/xyz" + protocol: "http" + pick_host_name_from_backend_http_settings: true + timeout: 30 + unhealthy_threshold: 2 + redirect_configurations: + - name: "redirect-http" + redirect_type: "permanent" + target_listener: "inbound-traffic1" + include_path: true + include_query_string: true + request_routing_rules: + - "redirect-routing" + - name: "redirect-traffic1" + redirect_type: "found" + target_listener: "inbound-traffic1" + include_path: true + include_query_string: true + url_path_maps: + - "path_mappings" + register: output + +- name: Assert the resource instance is well created + assert: + that: + - output.changed + + +- name: Try to create v2 instance of Application Gateway with autoscale configuration and trusted root certificates - no update + azure_rm_appgateway: + resource_group: "{{ resource_group }}" + name: "appgateway-v2-{{ rpfx }}" + sku: + name: standard_v2 + tier: standard_v2 + autoscale_configuration: + max_capacity: 2 + min_capacity: 1 + enable_http2: true + ssl_policy: + policy_type: "predefined" + policy_name: "ssl_policy20170401_s" + ssl_certificates: + - name: cert2 + password: your-password + data: "{{ lookup('file', cert2_file) }}" + trusted_root_certificates: + - name: "rootCert3" + data: "{{ lookup('file', cert3b64_file) }}" + gateway_ip_configurations: + - subnet: + id: "{{ subnet_output.state.id }}" + name: app_gateway_ip_config + frontend_ip_configurations: + - name: "public-inbound-ip" + public_ip_address: "appgateway-v2-{{ rpfx }}-pip" + frontend_ports: + - name: "inbound-http" + port: 80 + - name: "inbound-https" + port: 443 + backend_address_pools: + - name: test_backend_address_pool1 + backend_addresses: + - ip_address: 10.0.0.1 + - name: test_backend_address_pool2 + backend_addresses: + - ip_address: 10.0.0.2 + backend_http_settings_collection: + - name: "http-profile1" + port: 443 + protocol: https + pick_host_name_from_backend_address: true + probe: "http-probe1" + cookie_based_affinity: "Disabled" + connection_draining: + drain_timeout_in_sec: 60 + enabled: true + trusted_root_certificates: + - "rootCert3" + - name: "http-profile2" + port: 8080 + protocol: http + pick_host_name_from_backend_address: true + probe: "http-probe2" + cookie_based_affinity: "Disabled" + http_listeners: + - name: "inbound-http" + protocol: "http" + frontend_ip_configuration: "public-inbound-ip" + frontend_port: "inbound-http" + - name: "inbound-traffic1" + protocol: "https" + frontend_ip_configuration: "public-inbound-ip" + frontend_port: "inbound-https" + host_name: "traffic1.example.com" + require_server_name_indication: true + ssl_certificate: "cert2" + - name: "inbound-traffic2" + protocol: "https" + frontend_ip_configuration: "public-inbound-ip" + frontend_port: "inbound-https" + host_name: "traffic2.example.com" + require_server_name_indication: true + ssl_certificate: "cert2" + url_path_maps: + - name: "path_mappings" + default_redirect_configuration: "redirect-traffic1" + default_rewrite_rule_set: "configure-headers" + path_rules: + - name: "path_rules" + backend_address_pool: "test_backend_address_pool1" + backend_http_settings: "http-profile1" + paths: + - "/abc" + - "/123/*" + request_routing_rules: + - name: "app-routing1" + rule_type: "basic" + http_listener: "inbound-traffic1" + backend_address_pool: "test_backend_address_pool2" + backend_http_settings: "http-profile1" + rewrite_rule_set: "configure-headers" + - name: "app-routing2" + rule_type: "path_based_routing" + http_listener: "inbound-traffic2" + url_path_map: "path_mappings" + - name: "redirect-routing" + rule_type: "basic" + http_listener: "inbound-http" + redirect_configuration: "redirect-http" + rewrite_rule_sets: + - name: "configure-headers" + rewrite_rules: + - name: "add-security-response-header" + rule_sequence: 1 + action_set: + response_header_configurations: + - header_name: "Strict-Transport-Security" + header_value: "max-age=31536000" + - name: "remove-backend-response-headers" + rule_sequence: 2 + action_set: + response_header_configurations: + - header_name: "Server" + - header_name: "X-Powered-By" + - name: "set-custom-header-condition" + rule_sequence: 3 + conditions: + - variable: "var_client_ip" + pattern: "1.1.1.1" + - variable: "http_req_Authorization" + pattern: "12345" + ignore_case: false + action_set: + request_header_configurations: + - header_name: "Foo" + header_value: "Bar" + probes: + - name: "http-probe1" + interval: 30 + path: "/abc" + protocol: "https" + pick_host_name_from_backend_http_settings: true + timeout: 30 + unhealthy_threshold: 2 + - name: "http-probe2" + interval: 30 + path: "/xyz" + protocol: "http" + pick_host_name_from_backend_http_settings: true + timeout: 30 + unhealthy_threshold: 2 + redirect_configurations: + - name: "redirect-http" + redirect_type: "permanent" + target_listener: "inbound-traffic1" + include_path: true + include_query_string: true + request_routing_rules: + - "redirect-routing" + - name: "redirect-traffic1" + redirect_type: "found" + target_listener: "inbound-traffic1" + include_path: true + include_query_string: true + url_path_maps: + - "path_mappings" + register: output + +- name: Assert the resource instance is not updated + assert: + that: + - not output.changed + +- name: Configure public IP for waf_v2 gateway + azure_rm_publicipaddress: + name: "appgateway-waf-v2-{{ rpfx }}-pip" + resource_group: "{{ resource_group }}" + sku: "standard" + allocation_method: "static" + +- name: Try to create waf_v2 instance of Application Gateway with waf configuration + azure_rm_appgateway: + resource_group: "{{ resource_group }}" + name: "appgateway-waf-v2-{{ rpfx }}" + sku: + name: waf_v2 + tier: waf_v2 + capacity: 2 + ssl_policy: + policy_type: "predefined" + policy_name: "ssl_policy20170401_s" + ssl_certificates: + - name: cert2 + password: your-password + data: "{{ lookup('file', cert2_file) }}" + gateway_ip_configurations: + - subnet: + id: "{{ subnet_output.state.id }}" + name: app_gateway_ip_config + frontend_ip_configurations: + - name: "public-inbound-ip" + public_ip_address: "appgateway-waf-v2-{{ rpfx }}-pip" + frontend_ports: + - name: "inbound-http" + port: 80 + - name: "inbound-https" + port: 443 + backend_address_pools: + - name: test_backend_address_pool1 + backend_addresses: + - ip_address: 10.0.0.1 + - name: test_backend_address_pool2 + backend_addresses: + - ip_address: 10.0.0.2 + backend_http_settings_collection: + - name: "http-profile1" + port: 443 + protocol: https + pick_host_name_from_backend_address: true + probe: "http-probe1" + cookie_based_affinity: "Disabled" + - name: "http-profile2" + port: 8080 + protocol: http + pick_host_name_from_backend_address: true + probe: "http-probe2" + cookie_based_affinity: "Disabled" + http_listeners: + - name: "inbound-http" + protocol: "http" + frontend_ip_configuration: "public-inbound-ip" + frontend_port: "inbound-http" + - name: "inbound-traffic1" + protocol: "https" + frontend_ip_configuration: "public-inbound-ip" + frontend_port: "inbound-https" + host_name: "traffic1.example.com" + require_server_name_indication: true + ssl_certificate: "cert2" + - name: "inbound-traffic2" + protocol: "https" + frontend_ip_configuration: "public-inbound-ip" + frontend_port: "inbound-https" + host_name: "traffic2.example.com" + require_server_name_indication: true + ssl_certificate: "cert2" + url_path_maps: + - name: "path_mappings" + default_redirect_configuration: "redirect-traffic1" + default_rewrite_rule_set: "configure-headers" + path_rules: + - name: "path_rules" + backend_address_pool: "test_backend_address_pool1" + backend_http_settings: "http-profile1" + paths: + - "/abc" + - "/123/*" + request_routing_rules: + - name: "app-routing1" + rule_type: "basic" + http_listener: "inbound-traffic1" + backend_address_pool: "test_backend_address_pool2" + backend_http_settings: "http-profile1" + rewrite_rule_set: "configure-headers" + - name: "app-routing2" + rule_type: "path_based_routing" + http_listener: "inbound-traffic2" + url_path_map: "path_mappings" + - name: "redirect-routing" + rule_type: "basic" + http_listener: "inbound-http" + redirect_configuration: "redirect-http" + rewrite_rule_sets: + - name: "configure-headers" + rewrite_rules: + - name: "add-security-response-header" + rule_sequence: 1 + action_set: + response_header_configurations: + - header_name: "Strict-Transport-Security" + header_value: "max-age=31536000" + - name: "remove-backend-response-headers" + rule_sequence: 2 + action_set: + response_header_configurations: + - header_name: "Server" + - header_name: "X-Powered-By" + - name: "set-custom-header-condition" + rule_sequence: 3 + conditions: + - variable: "var_client_ip" + pattern: "1.1.1.1" + - variable: "http_req_Authorization" + pattern: "12345" + ignore_case: false + action_set: + request_header_configurations: + - header_name: "Foo" + header_value: "Bar" + probes: + - name: "http-probe1" + interval: 30 + path: "/abc" + protocol: "https" + pick_host_name_from_backend_http_settings: true + timeout: 30 + unhealthy_threshold: 2 + - name: "http-probe2" + interval: 30 + path: "/xyz" + protocol: "http" + pick_host_name_from_backend_http_settings: true + timeout: 30 + unhealthy_threshold: 2 + redirect_configurations: + - name: "redirect-http" + redirect_type: "permanent" + target_listener: "inbound-traffic1" + include_path: true + include_query_string: true + request_routing_rules: + - "redirect-routing" + - name: "redirect-traffic1" + redirect_type: "found" + target_listener: "inbound-traffic1" + include_path: true + include_query_string: true + url_path_maps: + - "path_mappings" + web_application_firewall_configuration: + enabled: true + firewall_mode: "Detection" + rule_set_type: "OWASP" + rule_set_version: "3.0" + request_body_check: true + max_request_body_size_in_kb: 128 + file_upload_limit_in_mb: 100 + register: output + +- name: Assert the resource instance is well created + assert: + that: + - output.changed + +- name: Try to create waf_v2 instance of Application Gateway with waf configuration - no update + azure_rm_appgateway: + resource_group: "{{ resource_group }}" + name: "appgateway-waf-v2-{{ rpfx }}" + sku: + name: waf_v2 + tier: waf_v2 + capacity: 2 + ssl_policy: + policy_type: "predefined" + policy_name: "ssl_policy20170401_s" + ssl_certificates: + - name: cert2 + password: your-password + data: "{{ lookup('file', cert2_file) }}" + gateway_ip_configurations: + - subnet: + id: "{{ subnet_output.state.id }}" + name: app_gateway_ip_config + frontend_ip_configurations: + - name: "public-inbound-ip" + public_ip_address: "appgateway-waf-v2-{{ rpfx }}-pip" + frontend_ports: + - name: "inbound-http" + port: 80 + - name: "inbound-https" + port: 443 + backend_address_pools: + - name: test_backend_address_pool1 + backend_addresses: + - ip_address: 10.0.0.1 + - name: test_backend_address_pool2 + backend_addresses: + - ip_address: 10.0.0.2 + backend_http_settings_collection: + - name: "http-profile1" + port: 443 + protocol: https + pick_host_name_from_backend_address: true + probe: "http-probe1" + cookie_based_affinity: "Disabled" + - name: "http-profile2" + port: 8080 + protocol: http + pick_host_name_from_backend_address: true + probe: "http-probe2" + cookie_based_affinity: "Disabled" + http_listeners: + - name: "inbound-http" + protocol: "http" + frontend_ip_configuration: "public-inbound-ip" + frontend_port: "inbound-http" + - name: "inbound-traffic1" + protocol: "https" + frontend_ip_configuration: "public-inbound-ip" + frontend_port: "inbound-https" + host_name: "traffic1.example.com" + require_server_name_indication: true + ssl_certificate: "cert2" + - name: "inbound-traffic2" + protocol: "https" + frontend_ip_configuration: "public-inbound-ip" + frontend_port: "inbound-https" + host_name: "traffic2.example.com" + require_server_name_indication: true + ssl_certificate: "cert2" + url_path_maps: + - name: "path_mappings" + default_redirect_configuration: "redirect-traffic1" + default_rewrite_rule_set: "configure-headers" + path_rules: + - name: "path_rules" + backend_address_pool: "test_backend_address_pool1" + backend_http_settings: "http-profile1" + paths: + - "/abc" + - "/123/*" + request_routing_rules: + - name: "app-routing1" + rule_type: "basic" + http_listener: "inbound-traffic1" + backend_address_pool: "test_backend_address_pool2" + backend_http_settings: "http-profile1" + rewrite_rule_set: "configure-headers" + - name: "app-routing2" + rule_type: "path_based_routing" + http_listener: "inbound-traffic2" + url_path_map: "path_mappings" + - name: "redirect-routing" + rule_type: "basic" + http_listener: "inbound-http" + redirect_configuration: "redirect-http" + rewrite_rule_sets: + - name: "configure-headers" + rewrite_rules: + - name: "add-security-response-header" + rule_sequence: 1 + action_set: + response_header_configurations: + - header_name: "Strict-Transport-Security" + header_value: "max-age=31536000" + - name: "remove-backend-response-headers" + rule_sequence: 2 + action_set: + response_header_configurations: + - header_name: "Server" + - header_name: "X-Powered-By" + - name: "set-custom-header-condition" + rule_sequence: 3 + conditions: + - variable: "var_client_ip" + pattern: "1.1.1.1" + - variable: "http_req_Authorization" + pattern: "12345" + ignore_case: false + action_set: + request_header_configurations: + - header_name: "Foo" + header_value: "Bar" + probes: + - name: "http-probe1" + interval: 30 + path: "/abc" + protocol: "https" + pick_host_name_from_backend_http_settings: true + timeout: 30 + unhealthy_threshold: 2 + - name: "http-probe2" + interval: 30 + path: "/xyz" + protocol: "http" + pick_host_name_from_backend_http_settings: true + timeout: 30 + unhealthy_threshold: 2 + redirect_configurations: + - name: "redirect-http" + redirect_type: "permanent" + target_listener: "inbound-traffic1" + include_path: true + include_query_string: true + request_routing_rules: + - "redirect-routing" + - name: "redirect-traffic1" + redirect_type: "found" + target_listener: "inbound-traffic1" + include_path: true + include_query_string: true + url_path_maps: + - "path_mappings" + web_application_firewall_configuration: + enabled: true + firewall_mode: "Detection" + rule_set_type: "OWASP" + rule_set_version: "3.0" + request_body_check: true + max_request_body_size_in_kb: 128 + file_upload_limit_in_mb: 100 + register: output + +- name: Assert the resource instance is not updated + assert: + that: + - not output.changed + +- name: Delete instance of Application Gateway -- check mode + azure_rm_appgateway: + resource_group: "{{ resource_group }}" + name: "appgateway{{ rpfx }}" + state: absent + check_mode: yes + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Delete instance of Application Gateway + azure_rm_appgateway: + resource_group: "{{ resource_group }}" + name: "appgateway{{ rpfx }}" + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Delete unexisting instance of Application Gateway + azure_rm_appgateway: + resource_group: "{{ resource_group }}" + name: "appgateway{{ rpfx }}" + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed == false + +- name: Delete path-based rules instance of Application Gateway + azure_rm_appgateway: + resource_group: "{{ resource_group }}" + name: "appgateway-path-rules{{ rpfx }}" + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Delete custom probe instance of Application Gateway + azure_rm_appgateway: + resource_group: "{{ resource_group_secondary }}" + name: "appgateway-probe-{{ rpfx }}" + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Delete subnet-lookup instance of Application Gateway + azure_rm_appgateway: + resource_group: "{{ resource_group }}" + name: "appgateway-subnet-lookup{{ rpfx }}" + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Delete v2 instance of Application Gateway + azure_rm_appgateway: + resource_group: "{{ resource_group }}" + name: "appgateway-v2-{{ rpfx }}" + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Delete waf_v2 instance of Application Gateway + azure_rm_appgateway: + resource_group: "{{ resource_group }}" + name: "appgateway-waf-v2-{{ rpfx }}" + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Delete public IP for v2 gateway + azure_rm_publicipaddress: + name: "appgateway-v2-{{ rpfx }}-pip" + resource_group: "{{ resource_group }}" + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Delete public IP for waf v2 gateway + azure_rm_publicipaddress: + name: "appgateway-waf-v2-{{ rpfx }}-pip" + resource_group: "{{ resource_group }}" + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Delete complex instance of Application Gateway + azure_rm_appgateway: + resource_group: "{{ resource_group }}" + name: "appgateway-complex{{ rpfx }}" + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Delete virtual network + azure_rm_virtualnetwork: + name: vnet{{ rpfx }} + resource_group: "{{ resource_group }}" + state: absent diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_appserviceplan/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_appserviceplan/aliases new file mode 100644 index 000000000..a6b233ed0 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_appserviceplan/aliases @@ -0,0 +1,4 @@ +cloud/azure +shippable/azure/group4 +unstable +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_appserviceplan/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_appserviceplan/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_appserviceplan/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_appserviceplan/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_appserviceplan/tasks/main.yml new file mode 100644 index 000000000..3e0906bfc --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_appserviceplan/tasks/main.yml @@ -0,0 +1,116 @@ +- name: Prepare facts + set_fact: + resource_prefix: "{{ resource_group_secondary | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + run_once: yes + +- name: Prepare facts + set_fact: + linux_plan_resource_group: "{{ resource_group_secondary }}" + win_plan_name: "{{ (resource_prefix | replace('-','x'))[-8:] }}winplan" + linux_plan_name: "{{ (resource_prefix | replace('-','x'))[-8:] }}linplan" + run_once: yes + +- name: create a windows plan + azure_rm_appserviceplan: + name: "{{ win_plan_name }}1" + resource_group: "{{ resource_group }}" + sku: B1 + register: output + +- name: assert app service was created + assert: + that: + - output.changed + - output.id + +- name: create a linux plan + azure_rm_appserviceplan: + resource_group: "{{ linux_plan_resource_group }}" + name: "{{ linux_plan_name }}1" + sku: S1 + is_linux: true + number_of_workers: 1 + register: output + +- name: assert app service was created + assert: + that: + - output.changed + - output.id + +- name: get app service plan by name + azure_rm_appserviceplan_info: + resource_group: "{{ linux_plan_resource_group }}" + name: "{{ linux_plan_name }}1" + register: output + +- name: assert is_linux is True + assert: + that: + - output.appserviceplans | length == 1 + - output.appserviceplans[0].is_linux == True + +- name: create linux app service plan idempotent + azure_rm_appserviceplan: + resource_group: "{{ linux_plan_resource_group }}" + name: "{{ linux_plan_name }}1" + sku: S1 + is_linux: true + number_of_workers: 1 + register: output + +- name: assert app service is not updated + assert: + that: not output.changed + +- name: update a windows plan sku + azure_rm_appserviceplan: + name: "{{ win_plan_name }}1" + resource_group: "{{ resource_group }}" + sku: B2 + register: output + +- name: assert app service was updated + assert: + that: + - output.changed + +- name: update a linux plan number of workers + azure_rm_appserviceplan: + resource_group: "{{ linux_plan_resource_group }}" + name: "{{ linux_plan_name }}1" + sku: S1 + is_linux: true + number_of_workers: 2 + register: output + +- name: assert app service was updated + assert: + that: + - output.changed + +- name: create premium linux plan + azure_rm_appserviceplan: + resource_group: "{{ linux_plan_resource_group }}" + name: "{{ linux_plan_name }}-premium" + sku: P1v2 + is_linux: true + register: output + +- name: assert app service was created + assert: + that: + - output.changed + - output.id + +- name: create premium linux plan idempotent + azure_rm_appserviceplan: + resource_group: "{{ linux_plan_resource_group }}" + name: "{{ linux_plan_name }}-premium" + sku: P1v2 + is_linux: true + register: output + +- name: assert app service is not updated + assert: + that: not output.changed diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_automationaccount/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_automationaccount/aliases new file mode 100644 index 000000000..bd71a19ed --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_automationaccount/aliases @@ -0,0 +1,4 @@ +cloud/azure +shippable/azure/group11 +destructive +azure_rm_automationaccount_facts diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_automationaccount/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_automationaccount/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_automationaccount/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_automationaccount/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_automationaccount/tasks/main.yml new file mode 100644 index 000000000..1e00eb9ac --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_automationaccount/tasks/main.yml @@ -0,0 +1,88 @@ +- name: Prepare random number + set_fact: + rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + name: "account{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + run_once: yes + +- name: Create automation account + azure_rm_automationaccount: + name: "{{ name }}" + resource_group: "{{ resource_group }}" + check_mode: yes + register: output + +- assert: + that: + - output.changed + +- name: Create automation account + azure_rm_automationaccount: + name: "{{ name }}" + resource_group: "{{ resource_group }}" + register: output + +- assert: + that: + - output.changed + - output.id + +- name: Create automation account + azure_rm_automationaccount: + name: "{{ name }}" + resource_group: "{{ resource_group }}" + register: output + +- assert: + that: + - not output.changed + +- name: Get automation account + azure_rm_automationaccount_info: + name: "{{ name }}" + resource_group: "{{ resource_group }}" + list_statistics: yes + list_usages: yes + list_keys: yes + register: facts + +- assert: + that: + - facts.automation_accounts | length == 1 + - facts.automation_accounts[0].keys + - facts.automation_accounts[0].usages + - facts.automation_accounts[0].statistics + - facts.automation_accounts[0].state == "Ok" + +- name: Delete account + azure_rm_automationaccount: + name: "{{ name }}" + resource_group: "{{ resource_group }}" + state: absent + check_mode: yes + register: output + +- assert: + that: + - output.changed + +- name: Delete account + azure_rm_automationaccount: + name: "{{ name }}" + resource_group: "{{ resource_group }}" + state: absent + register: output + +- assert: + that: + - output.changed + +- name: Delete account + azure_rm_automationaccount: + name: "{{ name }}" + resource_group: "{{ resource_group }}" + state: absent + register: output + +- assert: + that: + - not output.changed diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_automationrunbook/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_automationrunbook/aliases new file mode 100644 index 000000000..0d8bc62ca --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_automationrunbook/aliases @@ -0,0 +1,5 @@ +cloud/azure +shippable/azure/group11 +destructive +azure_rm_automationrunbook +azure_rm_automationrunbook_info diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_automationrunbook/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_automationrunbook/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_automationrunbook/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_automationrunbook/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_automationrunbook/tasks/main.yml new file mode 100644 index 000000000..d6f7ba8a1 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_automationrunbook/tasks/main.yml @@ -0,0 +1,139 @@ +- name: Prepare random number + set_fact: + rpfx: "{{ resource_group | hash('md5') | truncate(10, True, '') }}" + run_once: yes + +- name: Create automation account + azure_rm_automationaccount: + name: "account-{{ rpfx }}" + resource_group: "{{ resource_group }}" + +- name: create automation runbook (check mode) + azure_rm_automationrunbook: + resource_group: "{{ resource_group }}" + automation_account_name: "account-{{ rpfx }}" + name: "runbook-{{ rpfx }}" + runbook_type: "Script" + description: "Fred test" + check_mode: yes + +- name: create automation runbook with default parameters + azure_rm_automationrunbook: + resource_group: "{{ resource_group }}" + automation_account_name: "account-{{ rpfx }}" + name: "runbook-{{ rpfx }}" + runbook_type: "Script" + description: "Fred test" + register: output + +- name: Assert the automation runbook is well created + assert: + that: + - output.changed + +- name: create automation runbook with default parameters (idempotent) + azure_rm_automationrunbook: + resource_group: "{{ resource_group }}" + automation_account_name: "account-{{ rpfx }}" + name: "runbook-{{ rpfx }}" + runbook_type: "Script" + description: "Fred test" + register: output + +- name: Assert the automation runbook is well created + assert: + that: + - not output.changed + +- name: Create automation runbook with more paramters + azure_rm_automationrunbook: + resource_group: "{{ resource_group }}" + automation_account_name: "account-{{ rpfx }}" + name: "runbook-{{ rpfx }}-secondary" + runbook_type: "Script" + description: "test" + log_activity_trace: 2 + log_progress: False + log_verbose: True + tags: + key1: value1 + register: output + +- name: Assert the automation runbook is well created + assert: + that: + - output.changed + +- name: Update automation runbook with more paramters + azure_rm_automationrunbook: + resource_group: "{{ resource_group }}" + automation_account_name: "account-{{ rpfx }}" + name: "runbook-{{ rpfx }}-secondary" + runbook_type: "Script" + description: "update" + log_activity_trace: 3 + log_progress: True + log_verbose: False + tags: + key2: value2 + register: output + +- name: Assert the automation runbook is well created + assert: + that: + - output.changed + +- name: Publish automation runbook + azure_rm_automationrunbook: + resource_group: "{{ resource_group }}" + automation_account_name: "account-{{ rpfx }}" + name: "runbook-{{ rpfx }}-secondary" + publish: True + register: output + +- name: Assert the automation runbook is well published + assert: + that: + - output.changed + +- name: Get automation runbook + azure_rm_automationrunbook_info: + resource_group: "{{ resource_group }}" + automation_account_name: "account-{{ rpfx }}" + name: "runbook-{{ rpfx }}-secondary" + register: output + +- name: Assert the automation runbook is well created + assert: + that: + - output.automation_runbook[0].description == 'update' + - output.automation_runbook[0].log_activity_trace == 3 + - output.automation_runbook[0].log_progress == true + - output.automation_runbook[0].log_verbose == false + - output.automation_runbook[0].tags | length == 2 + - output.automation_runbook[0].state == 'Published' + +- name: Get all automation runbook + azure_rm_automationrunbook_info: + resource_group: "{{ resource_group }}" + automation_account_name: "account-{{ rpfx }}" + register: output + +- name: Assert the automation runbook is well created + assert: + that: + - output.automation_runbook | length == 2 + +- name: delete automation runbook + azure_rm_automationrunbook: + resource_group: "{{ resource_group }}" + automation_account_name: "account-{{ rpfx }}" + name: "runbook-{{ rpfx }}" + state: absent + +- name: delete automation runbook + azure_rm_automationrunbook: + resource_group: "{{ resource_group }}" + automation_account_name: "account-{{ rpfx }}" + name: "runbook-{{ rpfx }}-secondary" + state: absent diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_autoscale/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_autoscale/aliases new file mode 100644 index 000000000..ed266f358 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_autoscale/aliases @@ -0,0 +1,4 @@ +cloud/azure +shippable/azure/group14 +destructive +azure_rm_autoscale diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_autoscale/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_autoscale/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_autoscale/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_autoscale/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_autoscale/tasks/main.yml new file mode 100644 index 000000000..8c8f144dc --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_autoscale/tasks/main.yml @@ -0,0 +1,221 @@ +- name: Prepare random number + set_fact: + rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + name: "scale{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + run_once: yes + +- name: Create virtual network + azure_rm_virtualnetwork: + resource_group: "{{ resource_group }}" + name: testVnet + address_prefixes: "10.0.0.0/16" + +- name: Add subnet + azure_rm_subnet: + resource_group: "{{ resource_group }}" + name: testSubnet + address_prefix: "10.0.1.0/24" + virtual_network: testVnet + +- name: Create VMSS + azure_rm_virtualmachinescaleset: + resource_group: "{{ resource_group }}" + name: testVMSS{{ rpfx }} + vm_size: Standard_DS1_v2 + admin_username: testuser + ssh_password_enabled: false + ssh_public_keys: + - path: /home/testuser/.ssh/authorized_keys + key_data: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfoYlIV4lTPZTv7hXaVwQQuqBgGs4yeNRX0SPo2+HQt9u4X7IGwrtXc0nEUm6LfaCikMH58bOL8f20NTGz285kxdFHZRcBXtqmnMz2rXwhK9gwq5h1khc+GzHtdcJXsGA4y0xuaNcidcg04jxAlN/06fwb/VYwwWTVbypNC0gpGEpWckCNm8vlDlA55sU5et0SZ+J0RKVvEaweUOeNbFZqckGPA384imfeYlADppK/7eAxqfBVadVvZG8IJk4yvATgaIENIFj2cXxqu2mQ/Bp5Wr45uApvJsFXmi+v/nkiOEV1QpLOnEwAZo6EfFS4CCQtsymxJCl1PxdJ5LD4ZOtP xiuxi.sun@qq.com" + capacity: 2 + virtual_network_name: testVnet + subnet_name: testSubnet + upgrade_policy: Manual + tier: Standard + managed_disk_type: Standard_LRS + os_disk_caching: ReadWrite + image: + offer: UbuntuServer + publisher: Canonical + sku: 18.04-LTS + version: latest + data_disks: + - lun: 0 + disk_size_gb: 64 + caching: ReadWrite + managed_disk_type: Standard_LRS + register: vmss + +- name: create auto scaling (check mode) + azure_rm_autoscale: + resource_group: "{{ resource_group }}" + name: "{{ name }}" + target: "{{ vmss.ansible_facts.azure_vmss.id }}" + enabled: true + profiles: + - count: '1' + recurrence_days: + - Monday + name: Auto created scale condition + recurrence_timezone: China Standard Time + recurrence_mins: + - '0' + min_count: '1' + max_count: '1' + recurrence_frequency: Week + recurrence_hours: + - '18' + check_mode: yes + register: output + +- assert: + that: + - output.changed + +- name: create auto scaling + azure_rm_autoscale: + resource_group: "{{ resource_group }}" + name: "{{ name }}" + target: + name: "testVMSS{{ rpfx }}" + types: "virtualMachineScaleSets" + namespace: "Microsoft.Compute" + enabled: true + profiles: + - count: '1' + recurrence_days: + - Monday + name: Auto created scale condition + recurrence_timezone: China Standard Time + recurrence_mins: + - '0' + min_count: '1' + max_count: '1' + recurrence_frequency: Week + recurrence_hours: + - '18' + register: output + +- assert: + that: + - output.changed + - output.id + +- name: create auto scaling (idemponent) + azure_rm_autoscale: + resource_group: "{{ resource_group }}" + name: "{{ name }}" + target: "{{ vmss.ansible_facts.azure_vmss.id }}" + enabled: true + profiles: + - count: '1' + recurrence_days: + - Monday + name: Auto created scale condition + recurrence_timezone: China Standard Time + recurrence_mins: + - '0' + min_count: '1' + max_count: '1' + recurrence_frequency: Week + recurrence_hours: + - '18' + register: output + +- assert: + that: + - not output.changed + - output.id + +- name: update auto scaling + azure_rm_autoscale: + resource_group: "{{ resource_group }}" + name: "{{ name }}" + target: "{{ vmss.ansible_facts.azure_vmss.id }}" + enabled: true + profiles: + - count: '1' + recurrence_days: + - Monday + name: Auto created scale condition 0 + rules: + - time_aggregation: Average + time_window: 10 + direction: Increase + metric_name: Percentage CPU + metric_resource_uri: "{{ vmss.ansible_facts.azure_vmss.id }}" + value: '1' + threshold: 70 + cooldown: 5 + time_grain: 1 + statistic: Average + operator: GreaterThan + type: ChangeCount + max_count: '1' + recurrence_mins: + - '0' + min_count: '1' + recurrence_timezone: China Standard Time + recurrence_frequency: Week + recurrence_hours: + - '6' + register: output + +- assert: + that: + - output.changed + - output.profiles[0].rules[0].metric_resource_uri == vmss.ansible_facts.azure_vmss.id + +- name: delete auto scaling (check mode) + azure_rm_autoscale: + resource_group: "{{ resource_group }}" + name: "{{ name }}" + state: absent + check_mode: yes + register: output + +- assert: + that: + - output.changed + +- name: delete auto scaling + azure_rm_autoscale: + resource_group: "{{ resource_group }}" + name: "{{ name }}" + state: absent + register: output + +- assert: + that: + - output.changed + +- name: delete auto scaling (idemponetent) + azure_rm_autoscale: + resource_group: "{{ resource_group }}" + name: "{{ name }}" + state: absent + register: output + +- assert: + that: + - not output.changed + +- name: Clean VMSS + azure_rm_virtualmachinescaleset: + resource_group: "{{ resource_group }}" + vm_size: Standard_DS1_v2 + name: testVMSS{{ rpfx }} + state: absent + +- name: Clean subnet + azure_rm_subnet: + resource_group: "{{ resource_group }}" + name: testSubnet + virtual_network: testVnet + state: absent + +- name: Clean virtual network + azure_rm_virtualnetwork: + resource_group: "{{ resource_group }}" + name: testVnet + state: absent diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_availabilityset/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_availabilityset/aliases new file mode 100644 index 000000000..17456633d --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_availabilityset/aliases @@ -0,0 +1,4 @@ +cloud/azure +shippable/azure/group2 +destructive +azure_rm_availalibityset_info diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_availabilityset/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_availabilityset/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_availabilityset/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_availabilityset/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_availabilityset/tasks/main.yml new file mode 100644 index 000000000..7dc00bf26 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_availabilityset/tasks/main.yml @@ -0,0 +1,193 @@ +- name: Create an availability set with default options - Check Mode + azure_rm_availabilityset: + name: myavailabilityset1 + resource_group: "{{ resource_group }}" + tags: + tag1: testtag + register: results + check_mode: yes + +- assert: + that: results.changed + +- name: Create an availability set with default options + azure_rm_availabilityset: + name: myavailabilityset1 + resource_group: "{{ resource_group }}" + tags: + tag1: testtag + register: results + +- assert: + that: results.changed + +- name: Create proximity placement group name + set_fact: + ppgroup_name: "ppg{{ resource_group | hash('md5') | truncate(22, True, '') }}" + +- name: Create a proximity placement group + azure_rm_proximityplacementgroup: + resource_group: "{{ resource_group }}" + location: eastus + name: "{{ ppgroup_name }}" + state: present + register: results + +- name: Create an availability set with advanced options + azure_rm_availabilityset: + name: myavailabilityset2 + resource_group: "{{ resource_group }}" + platform_update_domain_count: 5 + platform_fault_domain_count: 2 + proximity_placement_group: "{{ ppgroup_name }}" + sku: Aligned + register: results + +- assert: + that: results.changed + +- name: Modify availabilty set immutable options - no changes, fail for immutable options + azure_rm_availabilityset: + name: myavailabilityset2 + resource_group: "{{ resource_group }}" + platform_update_domain_count: 2 + platform_fault_domain_count: 2 + proximity_placement_group: "{{ ppgroup_name }}" + sku: Aligned + register: results + ignore_errors: yes + +- assert: + that: + - not results.changed + - results.msg == 'You tried to change platform_update_domain_count but is was unsuccessful. An Availability Set is immutable, except tags' + +- name: Modify availabilty set immutable options and set tags - change tags and fail for immutable options + azure_rm_availabilityset: + name: myavailabilityset2 + resource_group: "{{ resource_group }}" + platform_update_domain_count: 2 + platform_fault_domain_count: 2 + proximity_placement_group: "{{ ppgroup_name }}" + sku: Aligned + tags: + test1: modified + register: results + ignore_errors: yes + +- assert: + that: + - not results.changed + - results.msg == 'You tried to change platform_update_domain_count but is was unsuccessful. An Availability Set is immutable, except tags' + +- name: Modify availabilty set options to update tags + azure_rm_availabilityset: + name: myavailabilityset2 + resource_group: "{{ resource_group }}" + platform_update_domain_count: 5 + platform_fault_domain_count: 2 + proximity_placement_group: "{{ ppgroup_name }}" + sku: Aligned + tags: + test2: modified + register: results + +- assert: + that: + - results.state.tags.test2 == 'modified' + +- name: Create availability set with incorrect fault domain parameter + azure_rm_availabilityset: + name: myavailabilityset3 + resource_group: "{{ resource_group }}" + platform_update_domain_count: 5 + platform_fault_domain_count: 4 + sku: Aligned + register: results + ignore_errors: yes + +- assert: + { that: "'The specified fault domain count 4 must fall in the range 1 to' in results['msg']" } + +- name: Test check_mode + azure_rm_availabilityset: + name: myavailabilityset2 + resource_group: "{{ resource_group }}" + platform_update_domain_count: 5 + platform_fault_domain_count: 2 + proximity_placement_group: "{{ ppgroup_name }}" + sku: Aligned + tags: + checktest1: modified1 + checktest2: modified2 + check_mode: yes + register: results + +- assert: + that: + - results.changed + - results.state.tags.checktest1 == 'modified1' + +# +# azure_rm_availabilityset_facts tests +# +- name: Get facts for created availability set + azure_rm_availabilityset_info: + name: myavailabilityset2 + resource_group: "{{ resource_group }}" + register: results + +- assert: + that: + - not results.changed + - not results.failed + - results.ansible_info.azure_availabilitysets[0].properties.platformFaultDomainCount == 2 + - results.ansible_info.azure_availabilitysets[0].properties.platformUpdateDomainCount == 5 + - results.ansible_info.azure_availabilitysets[0].sku == 'Aligned' + - results.ansible_info.azure_availabilitysets[0].properties.proximityPlacementGroup.id.split('/')[-1] == ppgroup_name + +- name: Delete an availability set - Check Mode + azure_rm_availabilityset: + name: myavailabilityset1 + resource_group: "{{ resource_group }}" + state: absent + check_mode: yes + register: results +- assert: + that: + - results.changed + +- name: Delete an availability set + azure_rm_availabilityset: + name: myavailabilityset1 + resource_group: "{{ resource_group }}" + state: absent + +- name: Delete an availability set already deleted - Check Mode + azure_rm_availabilityset: + name: myavailabilityset1 + resource_group: "{{ resource_group }}" + state: absent + check_mode: yes + register: results +- assert: + that: + - not results.changed + +- name: Delete an availability set + azure_rm_availabilityset: + name: myavailabilityset2 + resource_group: "{{ resource_group }}" + state: absent + +- name: Delete an availability set + azure_rm_availabilityset: + name: myavailabilityset3 + resource_group: "{{ resource_group }}" + state: absent + +- name: Delete proximity placement group + azure_rm_proximityplacementgroup: + resource_group: "{{ resource_group }}" + name: "{{ ppgroup_name }}" + state: absent diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_azurefirewall/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_azurefirewall/aliases new file mode 100644 index 000000000..239e36577 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_azurefirewall/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group4 +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_azurefirewall/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_azurefirewall/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_azurefirewall/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_azurefirewall/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_azurefirewall/tasks/main.yml new file mode 100644 index 000000000..face59c2b --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_azurefirewall/tasks/main.yml @@ -0,0 +1,277 @@ +- name: Fix resource prefix + set_fact: + virtual_network_name: myVirtualNetwork + subnet_name: AzureFirewallSubnet + public_ipaddress_name: myPublicIpAddress + azure_firewall_name: myFirewall + +- name: Create virtual network + azure_rm_virtualnetwork: + name: "{{ virtual_network_name }}" + address_prefixes_cidr: + - 10.1.0.0/16 + - 172.100.0.0/16 + dns_servers: + - 127.0.0.1 + - 127.0.0.3 + tags: + testing: testing + delete: on-exit + resource_group: "{{ resource_group }}" + +- name: Create subnet + azure_rm_subnet: + name: "{{ subnet_name }}" + virtual_network_name: "{{ virtual_network_name }}" + resource_group: "{{ resource_group }}" + address_prefix_cidr: "10.1.0.0/24" + +- name: Create public IP address + azure_rm_publicipaddress: + resource_group: "{{ resource_group }}" + allocation_method: Static + name: "{{ public_ipaddress_name }}" + sku: Standard + register: pip_output + +- debug: + var: pip_output + +- name: Create Azure Firewall + azure_rm_azurefirewall: + resource_group: '{{resource_group}}' + name: '{{azure_firewall_name}}' + #tags: + # key1: value1 + application_rule_collections: + - priority: 110 + action: deny + rules: + - name: rule1 + description: Deny inbound rule + source_addresses: + - 216.58.216.164 + - 10.0.0.0/25 + protocols: + - type: https + port: '443' + target_fqdns: + - www.test.com + name: apprulecoll + nat_rule_collections: + - priority: 112 + action: dnat + rules: + - name: DNAT-HTTPS-traffic + description: D-NAT all outbound web traffic for inspection + source_addresses: + - '*' + destination_addresses: + - "{{ pip_output.state.ip_address }}" + destination_ports: + - '443' + protocols: + - tcp + translated_address: 1.2.3.5 + translated_port: '8443' + name: natrulecoll + network_rule_collections: + - priority: 112 + action: deny + rules: + - name: L4-traffic + description: Block traffic based on source IPs and ports + protocols: + - tcp + source_addresses: + - 192.168.1.1-192.168.1.12 + - 10.1.4.12-10.1.4.255 + destination_addresses: + - '*' + destination_ports: + - 443-444 + - '8443' + name: netrulecoll + ip_configurations: + - subnet: + virtual_network_name: "{{ virtual_network_name }}" + name: "{{ subnet_name }}" + public_ip_address: + name: "{{ public_ipaddress_name }}" + name: azureFirewallIpConfiguration + register: output + +- debug: + var: output + +- name: Assert that output has changed + assert: + that: + - output.changed + +- name: Create Azure Firewall -- idempotent + azure_rm_azurefirewall: + resource_group: '{{resource_group}}' + name: '{{azure_firewall_name}}' + application_rule_collections: + - priority: 110 + action: deny + rules: + - name: rule1 + description: Deny inbound rule + source_addresses: + - 216.58.216.164 + - 10.0.0.0/25 + protocols: + - type: https + port: '443' + target_fqdns: + - www.test.com + name: apprulecoll + nat_rule_collections: + - priority: 112 + action: dnat + rules: + - name: DNAT-HTTPS-traffic + description: D-NAT all outbound web traffic for inspection + source_addresses: + - '*' + destination_addresses: + - "{{ pip_output.state.ip_address }}" + destination_ports: + - '443' + protocols: + - tcp + translated_address: 1.2.3.5 + translated_port: '8443' + name: natrulecoll + network_rule_collections: + - priority: 112 + action: deny + rules: + - name: L4-traffic + description: Block traffic based on source IPs and ports + protocols: + - tcp + source_addresses: + - 192.168.1.1-192.168.1.12 + - 10.1.4.12-10.1.4.255 + destination_addresses: + - '*' + destination_ports: + - 443-444 + - '8443' + name: netrulecoll + ip_configurations: + - subnet: + virtual_network_name: "{{ virtual_network_name }}" + name: "{{ subnet_name }}" + public_ip_address: + name: "{{ public_ipaddress_name }}" + name: azureFirewallIpConfiguration + register: output + +- debug: + var: output + +- name: Assert that output has not changed + assert: + that: + - not output.changed + +- name: Create Azure Firewall -- change something + azure_rm_azurefirewall: + resource_group: '{{resource_group}}' + name: '{{azure_firewall_name}}' + application_rule_collections: + - priority: 110 + action: deny + rules: + - name: rule1 + description: Deny inbound rule + source_addresses: + - 216.58.216.165 + - 10.0.0.0/25 + protocols: + - type: https + port: '443' + target_fqdns: + - www.test.com + name: apprulecoll + nat_rule_collections: + - priority: 112 + action: dnat + rules: + - name: DNAT-HTTPS-traffic + description: D-NAT all outbound web traffic for inspection + source_addresses: + - '*' + destination_addresses: + - "{{ pip_output.state.ip_address }}" + destination_ports: + - '443' + protocols: + - tcp + translated_address: 1.2.3.6 + translated_port: '8443' + name: natrulecoll + network_rule_collections: + - priority: 112 + action: deny + rules: + - name: L4-traffic + description: Block traffic based on source IPs and ports + protocols: + - tcp + source_addresses: + - 192.168.1.1-192.168.1.12 + - 10.1.4.12-10.1.4.255 + destination_addresses: + - '*' + destination_ports: + - 443-445 + - '8443' + name: netrulecoll + ip_configurations: + - subnet: + virtual_network_name: "{{ virtual_network_name }}" + name: "{{ subnet_name }}" + public_ip_address: + name: "{{ public_ipaddress_name }}" + name: azureFirewallIpConfiguration + check_mode: yes + register: output + +- name: Assert that output has changed + assert: + that: + - output.changed + +- name: Get info of the Azure Firewall + azure_rm_azurefirewall_info: + resource_group: '{{resource_group}}' + name: '{{azure_firewall_name}}' + register: output + +- assert: + that: + - not output.changed + - output.firewalls['id'] != None + - output.firewalls['name'] != None + - output.firewalls['location'] != None + - output.firewalls['etag'] != None + - output.firewalls['nat_rule_collections'] != None + - output.firewalls['network_rule_collections'] != None + - output.firewalls['ip_configurations'] != None + - output.firewalls['provisioning_state'] != None + +- name: Delete Azure Firewall + azure_rm_azurefirewall: + resource_group: '{{resource_group}}' + name: '{{azure_firewall_name}}' + state: absent + register: output + +- assert: + that: + - output.changed diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_backupazurevm/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_backupazurevm/aliases new file mode 100644 index 000000000..6c4c0f4ca --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_backupazurevm/aliases @@ -0,0 +1,4 @@ +cloud/azure +shippable/azure/group12 +destructive +disabled \ No newline at end of file diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_backupazurevm/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_backupazurevm/meta/main.yml new file mode 100644 index 000000000..48f5726d8 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_backupazurevm/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure \ No newline at end of file diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_backupazurevm/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_backupazurevm/tasks/main.yml new file mode 100644 index 000000000..b8d0064f2 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_backupazurevm/tasks/main.yml @@ -0,0 +1,76 @@ +- name: Fix resource prefix + set_fact: + resource_group: 'MyResourceGroup' + recovery_vault_name: 'MyRecoveryVault' + resource_id: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroup/providers/Microsoft.Compute/virtualMachines/MyVM" + backup_policy_id: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroup/providers/microsoft.recoveryservices/vaults/testVault/backupPolicies/DefaultPolicy" + +- name: Enabling/Updating protection for the Azure VM + azure_rm_backupazurevm: + resource_group: "{{ resource_group }}" + recovery_vault_name: "{{ recovery_vault_name }}" + resource_id: "{{ resource_id }}" + backup_policy_id: "{{ backup_policy_id }}" + state: "create" + register: output + +- name: Assert that output has changed + assert: + that: + - output.changed + +- name: Trigger an on-demand backup for a protected Azure VM + azure_rm_backupazurevm: + resource_group: "{{ resource_group }}" + recovery_vault_name: "{{ recovery_vault_name }}" + resource_id: "{{ resource_id }}" + backup_policy_id: "{{ backup_policy_id }}" + recovery_point_expiry_time: "2025-02-03T05:00:00Z" + state: "backup" + register: output + +- name: Assert that output has changed + assert: + that: + - output.changed + +- name: Stop protection but retain existing data + azure_rm_backupazurevm: + resource_group: "{{ resource_group }}" + recovery_vault_name: "{{ recovery_vault_name }}" + resource_id: "{{ resource_id }}" + backup_policy_id: "{{ backup_policy_id }}" + state: "stop" + register: output + +- name: Assert that output has changed + assert: + that: + - output.changed + +- name: Get backup azure vm info + azure_rm_backupazurevm_info: + resource_group: "{{ resource_group }}" + recovery_vault_name: "{{ recovery_vault_name }}" + resource_id: "{{ resource_id }}" + register: output + +- name: Assert that output has changed + assert: + that: + - output.response.id != None + - output.response.name != None + +- name: Stop protection and delete data + azure_rm_backupazurevm: + resource_group: "{{ resource_group }}" + recovery_vault_name: "{{ recovery_vault_name }}" + resource_id: "{{ resource_id }}" + backup_policy_id: "{{ backup_policy_id }}" + state: "delete" + register: output + +- name: Assert that output has changed + assert: + that: + - output.changed diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_backuppolicy/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_backuppolicy/aliases new file mode 100644 index 000000000..aa77c071a --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_backuppolicy/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group2 +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_backuppolicy/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_backuppolicy/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_backuppolicy/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_backuppolicy/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_backuppolicy/tasks/main.yml new file mode 100644 index 000000000..f57eed4a8 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_backuppolicy/tasks/main.yml @@ -0,0 +1,168 @@ +- name: Set Facts for Tests + set_fact: + vault_name: "rsv{{ resource_group | hash('md5') | truncate(22, True, '') }}" + location: "eastus" + policy_name_daily: "bp-daily-policy-{{ resource_group | hash('md5') | truncate(22, True, '') }}" + policy_name_weekly: "bp-weekly-policy-{{ resource_group | hash('md5') | truncate(22, True, '') }}" + +- name: Create Azure Recovery Service vault + azure_rm_recoveryservicesvault: + resource_group: "{{ resource_group }}" + name: "{{ vault_name }}" + location: "{{ location }}" + state: "present" + +- name: Create a daily VM backup policy + azure_rm_backuppolicy: + vault_name: "{{ vault_name }}" + name: "{{ policy_name_daily }}" + resource_group: "{{ resource_group }}" + state: present + backup_management_type: "AzureIaasVM" + schedule_run_frequency: "Daily" + instant_recovery_snapshot_retention: 2 + daily_retention_count: 12 + time_zone: "Pacific Standard Time" + schedule_run_time: 14 + register: daily_policy_output + +- name: Create a daily VM backup policy + azure_rm_backuppolicy_info: + vault_name: "{{ vault_name }}" + name: "{{ policy_name_daily }}" + resource_group: "{{ resource_group }}" + register: backup_policy_exists + +- name: Assert success on daily backup policy creation + assert: + that: + - daily_policy_output.changed + - daily_policy_output.name == policy_name_daily + +- name: Assert Policy Success Retrieving Info + assert: + that: + - backup_policy_exists.id == daily_policy_output.id + - backup_policy_exists.location == daily_policy_output.location + - backup_policy_exists.name == daily_policy_output.name + - backup_policy_exists.type == daily_policy_output.type + +- name: Update daily VM backup policy + azure_rm_backuppolicy: + vault_name: "{{ vault_name }}" + name: "{{ policy_name_daily }}" + resource_group: "{{ resource_group }}" + state: present + backup_management_type: "AzureIaasVM" + schedule_run_frequency: "Daily" + instant_recovery_snapshot_retention: 5 + daily_retention_count: 10 + time_zone: "Pacific Standard Time" + schedule_run_time: 10 + register: daily_policy_output_update + +- name: Assert success on update of daily policy + assert: + that: + - daily_policy_output_update.changed + - daily_policy_output_update.name == policy_name_daily + +- name: Create a weekly VM backup policy + azure_rm_backuppolicy: + vault_name: "{{ vault_name }}" + name: "{{ policy_name_weekly }}" + resource_group: "{{ resource_group }}" + state: present + backup_management_type: "AzureIaasVM" + schedule_run_frequency: "Weekly" + instant_recovery_snapshot_retention: 5 + weekly_retention_count: 4 + schedule_days: + - "Monday" + - "Wednesday" + - "Friday" + time_zone: "Pacific Standard Time" + schedule_run_time: 8 + register: weekly_policy_output + +- name: Assert success on weekly backup policy creation + assert: + that: + - weekly_policy_output.changed + - weekly_policy_output.name == policy_name_weekly + +- name: Update weekly VM backup policy + azure_rm_backuppolicy: + vault_name: "{{ vault_name }}" + name: "{{ policy_name_weekly }}" + resource_group: "{{ resource_group }}" + state: present + backup_management_type: "AzureIaasVM" + schedule_run_frequency: "Weekly" + instant_recovery_snapshot_retention: 5 + weekly_retention_count: 4 + schedule_days: + - "Monday" + - "Wednesday" + - "Thursday" + time_zone: "Pacific Standard Time" + schedule_run_time: 10 + register: weekly_policy_output_update + +- name: Assert success on update of weekly policy + assert: + that: + - weekly_policy_output_update.changed + - weekly_policy_output_update.name == policy_name_weekly + +- name: Delete a daily VM backup policy + azure_rm_backuppolicy: + vault_name: "{{ vault_name }}" + name: "{{ policy_name_daily }}" + resource_group: "{{ resource_group }}" + state: absent + register: daily_policy_output_delete + +- name: Assert success on daily backup policy deletion + assert: + that: + - daily_policy_output_delete.changed + +- name: Delete a weekly VM backup policy + azure_rm_backuppolicy: + vault_name: "{{ vault_name }}" + name: "{{ policy_name_weekly }}" + resource_group: "{{ resource_group }}" + state: absent + register: weekly_policy_output_delete + +- name: Assert success on weekly backup policy deletion + assert: + that: + - weekly_policy_output_delete.changed + +- name: Delete a daily VM backup policy (idempotent) + azure_rm_backuppolicy: + vault_name: "{{ vault_name }}" + name: "{{ policy_name_daily }}" + resource_group: "{{ resource_group }}" + state: absent + register: daily_policy_output_delete_idempotent + +- name: Assert that there is no change after second deletion of daily backup policy + assert: + that: + - not daily_policy_output_delete_idempotent.changed + +- name: Delete a weekly VM backup policy (idempotent) + azure_rm_backuppolicy: + vault_name: "{{ vault_name }}" + name: "{{ policy_name_weekly }}" + resource_group: "{{ resource_group }}" + state: absent + register: weekly_policy_output_delete_idempotent + +- name: Assert that there is no change after second deletion of weekly backup policy + assert: + that: + - not weekly_policy_output_delete_idempotent.changed diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_bastionhost/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_bastionhost/aliases new file mode 100644 index 000000000..3c63edec5 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_bastionhost/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group15 +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_bastionhost/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_bastionhost/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_bastionhost/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_bastionhost/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_bastionhost/tasks/main.yml new file mode 100644 index 000000000..c9d9c23d7 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_bastionhost/tasks/main.yml @@ -0,0 +1,173 @@ +- name: Set Bastion host name + set_fact: + rpfx: "{{ resource_group | hash('md5') | truncate(8, True, '') }}" + +- name: create resource group + azure_rm_resourcegroup: + name: "{{ resource_group }}" + location: eastus + +- name: Create virtual network + azure_rm_virtualnetwork: + resource_group: "{{ resource_group }}" + name: "vnet{{ rpfx }}" + address_prefixes_cidr: + - 10.1.0.0/16 + - 172.100.0.0/16 + dns_servers: + - 127.0.0.1 + +- name: Create a subnet + azure_rm_subnet: + resource_group: "{{ resource_group }}" + name: AzureBastionSubnet + virtual_network_name: "vnet{{ rpfx }}" + address_prefix_cidr: "10.1.0.0/26" + register: subnet_output + +- name: Create public ip + azure_rm_publicipaddress: + resource_group: "{{ resource_group }}" + name: "pip{{ rpfx }}" + allocation_method: Static + sku: Standard + +- name: Get public ip ID + azure_rm_publicipaddress_info: + resource_group: "{{ resource_group }}" + name: "pip{{ rpfx }}" + register: publicIP_output + +- name: Create bastion host (checkmode test) + azure_rm_bastionhost: + resource_group: "{{ resource_group }}" + name: "bh{{ rpfx }}" + ip_configurations: + - name: testip_configuration + subnet: + id: "{{ subnet_output.state.id }}" + public_ip_address: + id: "{{ publicIP_output.publicipaddresses[0].id }}" + private_ip_allocation_method: Dynamic + sku: + name: Standard + enable_tunneling: False + enable_shareable_link: False + enable_ip_connect: False + enable_file_copy: False + scale_units: 6 + disable_copy_paste: False + tags: + key3: value3 + check_mode: yes + register: output + +- name: Create bastion host + azure_rm_bastionhost: + resource_group: "{{ resource_group }}" + name: "bh{{ rpfx }}" + ip_configurations: + - name: testip_configuration + subnet: + id: "{{ subnet_output.state.id }}" + public_ip_address: + id: "{{ publicIP_output.publicipaddresses[0].id }}" + private_ip_allocation_method: Dynamic + sku: + name: Standard + enable_tunneling: False + enable_shareable_link: False + enable_ip_connect: False + enable_file_copy: False + scale_units: 6 + disable_copy_paste: False + tags: + key3: value3 + register: output + +- name: Assert the bastion host created + assert: + that: + - output.changed + +- name: Create bastion host (Idempotent Test) + azure_rm_bastionhost: + resource_group: "{{ resource_group }}" + name: "bh{{ rpfx }}" + ip_configurations: + - name: testip_configuration + subnet: + id: "{{ subnet_output.state.id }}" + public_ip_address: + id: "{{ publicIP_output.publicipaddresses[0].id }}" + private_ip_allocation_method: Dynamic + sku: + name: Standard + enable_tunneling: False + enable_shareable_link: False + enable_ip_connect: False + enable_file_copy: False + scale_units: 6 + disable_copy_paste: False + tags: + key3: value3 + register: output + +- name: Assert the bastion host no changed + assert: + that: + - not output.changed + +- name: Pause for 20 mimutes to Bastion host updating + shell: sleep 1200 + +- name: Update bastion host + azure_rm_bastionhost: + resource_group: "{{ resource_group }}" + name: "bh{{ rpfx }}" + ip_configurations: + - name: testip_configuration + subnet: + id: "{{ subnet_output.state.id }}" + public_ip_address: + id: "{{ publicIP_output.publicipaddresses[0].id }}" + private_ip_allocation_method: Dynamic + sku: + name: Basic + enable_tunneling: True + enable_shareable_link: True + enable_ip_connect: True + enable_file_copy: True + scale_units: 8 + disable_copy_paste: True + tags: + key2: value2 + register: output + +- name: Assert the bastion host updated + assert: + that: + - output.changed + +- name: Get bastion host info + azure_rm_bastionhost_info: + resource_group: "{{ resource_group }}" + name: "bh{{ rpfx }}" + register: output + +- name: Assert the bastion host is well create + assert: + that: + - output.bastion_host[0].disable_copy_paste == true + - output.bastion_host[0].enable_file_copy == true + - output.bastion_host[0].enable_ip_connect == true + - output.bastion_host[0].enable_shareable_link == true + - output.bastion_host[0].enable_tunneling == true + - output.bastion_host[0].scale_units == 8 + - output.bastion_host[0].sku.name == 'Basic' + +- name: Delete bastion host + azure_rm_bastionhost: + resource_group: "{{ resource_group }}" + name: "bh{{ rpfx }}" + state: absent diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_batchaccount/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_batchaccount/aliases new file mode 100644 index 000000000..bbfe7e8cd --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_batchaccount/aliases @@ -0,0 +1,3 @@ +cloud/azure +destructive +shippable/azure/group12 diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_batchaccount/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_batchaccount/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_batchaccount/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_batchaccount/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_batchaccount/tasks/main.yml new file mode 100644 index 000000000..e62cb67cf --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_batchaccount/tasks/main.yml @@ -0,0 +1,76 @@ +--- +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# +# ---------------------------------------------------------------------------- +- name: Prepare random number + set_fact: + storage_account_name: "st{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + batch_account_name: "ba{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + run_once: yes + +- name: Create Storage Account + azure_rm_storageaccount: + resource_group: "{{ resource_group }}" + name: "{{ storage_account_name }}" + location: eastus + account_type: Standard_LRS + +- name: Create Batch Account + azure_rm_batchaccount: + resource_group: "{{ resource_group }}" + name: "{{ batch_account_name }}" + location: eastus + auto_storage_account: + name: "{{ storage_account_name }}" + pool_allocation_mode: batch_service + register: output + +- name: Assert the resource was created + assert: + that: + - output.changed + +- name: Create Batch Account -- idempotent + azure_rm_batchaccount: + resource_group: "{{ resource_group }}" + name: "{{ batch_account_name }}" + location: eastus + auto_storage_account: + name: "{{ storage_account_name }}" + pool_allocation_mode: batch_service + register: output + +- name: Assert the resource was created + assert: + that: + - not output.changed + +- name: Delete Batch Account + azure_rm_batchaccount: + resource_group: "{{ resource_group }}" + name: "{{ batch_account_name }}" + location: eastus + auto_storage_account: + name: "{{ storage_account_name }}" + pool_allocation_mode: batch_service + state: absent + register: output + +- name: Assert that state has changed + assert: + that: + - output.changed + +- name: Clean up storage account + azure_rm_storageaccount: + resource_group: "{{ resource_group }}" + name: "{{ storage_account_name }}" + state: absent diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cdnprofile/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cdnprofile/aliases new file mode 100644 index 000000000..42643220a --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cdnprofile/aliases @@ -0,0 +1,5 @@ +cloud/azure +shippable/azure/group13 +destructive +azure_rm_cdnprofile_info +azure_rm_cdnendpoint diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cdnprofile/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cdnprofile/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cdnprofile/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cdnprofile/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cdnprofile/tasks/main.yml new file mode 100644 index 000000000..657881a50 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cdnprofile/tasks/main.yml @@ -0,0 +1,276 @@ +- name: Prepare random number + set_fact: + cdnprofilename: "cdnprofile{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + endpointname: "endpoint{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + run_once: yes + + +- name: Create a CDN profile(check mode) + azure_rm_cdnprofile: + resource_group: "{{ resource_group }}" + name: "{{ cdnprofilename }}" + sku: standard_akamai + tags: + testing: testing + delete: on-exit + foo: bar + check_mode: yes + +- name: Check there is no CDN profile created + azure_rm_cdnprofile_info: + resource_group: "{{ resource_group }}" + name: "{{ cdnprofilename }}" + register: fact + +- name: Check there is no CDN profile created + assert: { that: "{{ fact.cdnprofiles | length }} == 0" } + +- name: Create a CDN profile + azure_rm_cdnprofile: + resource_group: "{{ resource_group }}" + name: "{{ cdnprofilename }}" + sku: standard_akamai + tags: + testing: testing + delete: on-exit + foo: bar + register: output + +- name: Assert the CDN profile is well created + assert: + that: + - output.changed + - output.id != '' + +- name: Gather CDN profile facts + azure_rm_cdnprofile_info: + resource_group: "{{ resource_group }}" + name: "{{ cdnprofilename }}" + register: fact + +- name: Assert fact returns the created one + assert: + that: + - "fact.cdnprofiles | length == 1" + - fact.cdnprofiles[0].sku == 'Standard_Akamai' + - fact.cdnprofiles[0].tags.foo == 'bar' + +- name: Create a CDN profile (idempotent) + azure_rm_cdnprofile: + resource_group: "{{ resource_group }}" + name: "{{ cdnprofilename }}" + sku: standard_akamai + tags: + testing: testing + delete: on-exit + foo: bar + register: output + +- name: Assert idempotent + assert: + that: + - not output.changed + +- name: Update the CDN profile + azure_rm_cdnprofile: + resource_group: "{{ resource_group }}" + name: "{{ cdnprofilename }}" + sku: standard_akamai + tags: + testing: testing + delete: on-exit + foo: bar + baz: qux + register: output + +- name: Assert the CDN profile is updated + assert: + that: + - output.changed + +- name: Delete the CDN profile(check mode) + azure_rm_cdnprofile: + resource_group: "{{ resource_group }}" + name: "{{ cdnprofilename }}" + state: absent + check_mode: yes + +- name: Gather CDN profile facts + azure_rm_cdnprofile_info: + resource_group: "{{ resource_group }}" + name: "{{ cdnprofilename }}" + register: fact + +- name: Assert the CDN is still there + assert: + that: + - "fact.cdnprofiles | length == 1" + - fact.cdnprofiles[0].sku == 'Standard_Akamai' + - fact.cdnprofiles[0].tags.foo == 'bar' + - fact.cdnprofiles[0].tags.baz == 'qux' + +- name: Create a Azure CDN endpoint(check mode) + azure_rm_cdnendpoint: + resource_group: "{{ resource_group }}" + name: "{{ endpointname }}" + profile_name: "{{ cdnprofilename }}" + origins: + - name: "org{{ endpointname }}" + host_name: "www.google.com" + tags: + testing: testing + delete: on-exit + foo: bar + check_mode: yes + +- name: Create a Azure CDN endpoint + azure_rm_cdnendpoint: + resource_group: "{{ resource_group }}" + name: "{{ endpointname }}" + profile_name: "{{ cdnprofilename }}" + origins: + - name: "org{{ endpointname }}" + host_name: "www.google.com" + tags: + testing: testing + delete: on-exit + foo: bar + register: output + +- name: Assert the Azure CDN endpoint is well created + assert: + that: + - output.changed + - output.id + +- name: Get facts of a Azure CDN endpoint + azure_rm_cdnendpoint_info: + resource_group: "{{ resource_group }}" + name: "{{ endpointname }}" + profile_name: "{{ cdnprofilename }}" + register: facts + +- name: Assert facts output + assert: + that: + - facts['cdnendpoints'] | length == 1 + - facts['cdnendpoints'][0]['id'] + - facts['cdnendpoints'][0]['name'] + - facts['cdnendpoints'][0]['profile_name'] + - facts['cdnendpoints'][0]['origin'] + - facts['cdnendpoints'][0]['location'] + - facts['cdnendpoints'][0]['provisioning_state'] + - facts['cdnendpoints'][0]['resource_state'] + +- name: Create a Azure CDN endpoint(idempotent) + azure_rm_cdnendpoint: + resource_group: "{{ resource_group }}" + name: "{{ endpointname }}" + profile_name: "{{ cdnprofilename }}" + origins: + - name: "org{{ endpointname }}" + host_name: "www.google.com" + tags: + testing: testing + delete: on-exit + foo: bar + register: output + +- name: Assert idempotent + assert: + that: + - not output.changed + +- name: Stop a Azure CDN endpoint + azure_rm_cdnendpoint: + resource_group: "{{ resource_group }}" + name: "{{ endpointname }}" + profile_name: "{{ cdnprofilename }}" + started: False + register: output + +- name: Assert stopped + assert: + that: + - output.changed + +- name: Stop a Azure CDN endpoint(idempotent) + azure_rm_cdnendpoint: + resource_group: "{{ resource_group }}" + name: "{{ endpointname }}" + profile_name: "{{ cdnprofilename }}" + started: False + register: output + +- name: Assert still stopped and not changed + assert: + that: + - not output.changed + +- name: Start a Azure CDN endpoint + azure_rm_cdnendpoint: + resource_group: "{{ resource_group }}" + name: "{{ endpointname }}" + profile_name: "{{ cdnprofilename }}" + started: True + register: output + +- name: Assert started + assert: + that: + - output.changed + +- name: Update the Azure CDN endpoint + azure_rm_cdnendpoint: + resource_group: "{{ resource_group }}" + name: "{{ endpointname }}" + profile_name: "{{ cdnprofilename }}" + origin_path: /test/ + tags: + testing: testing + delete: on-exit + foo: baz + register: output + +- name: Assert the Azure CDN endpoint is updated + assert: + that: + - output.changed + +- name: Delete a Azure CDN endpoint(check mode) + azure_rm_cdnendpoint: + resource_group: "{{ resource_group }}" + name: "{{ endpointname }}" + profile_name: "{{ cdnprofilename }}" + state: absent + check_mode: yes + +- name: Delete a Azure CDN endpoint + azure_rm_cdnendpoint: + resource_group: "{{ resource_group }}" + name: "{{ endpointname }}" + profile_name: "{{ cdnprofilename }}" + state: absent + +- name: Delete the CDN profile + azure_rm_cdnprofile: + resource_group: "{{ resource_group }}" + name: "{{ cdnprofilename }}" + state: absent + register: output + +- name: Assert the CDN profile is well deleted + assert: + that: + - output.changed + +- name: Get CDN profile fact + azure_rm_cdnprofile_info: + resource_group: "{{ resource_group }}" + name: "{{ cdnprofilename }}" + register: fact + +- name: Assert fact returns empty + assert: + that: + - "fact.cdnprofiles | length == 0" diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cognitivesearch/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cognitivesearch/aliases new file mode 100644 index 000000000..5d29c6c4d --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cognitivesearch/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group10 +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cognitivesearch/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cognitivesearch/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cognitivesearch/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cognitivesearch/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cognitivesearch/tasks/main.yml new file mode 100644 index 000000000..fefad59a8 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cognitivesearch/tasks/main.yml @@ -0,0 +1,194 @@ +- name: Create Azure Search name + set_fact: + search_name: "search{{ resource_group | hash('md5') | truncate(16, True, '') }}" + search_name_secondary: "search{{ resource_group | hash('md5') | truncate(16, True, '') }}-secondary" + +- name: Create invalid Azure Search - Hosting Name + azure_rm_cognitivesearch: + name: "{{ search_name }}" + resource_group: "{{ resource_group }}" + hosting_mode: highDensity + register: invalid_hosting_name + ignore_errors: yes + +- name: Assert task failed + assert: + that: + - invalid_hosting_name.failed == True + +- name: Create invalid Azure Search - Partition Count High Density + azure_rm_cognitivesearch: + name: "{{ search_name }}" + resource_group: "{{ resource_group }}" + sku: standard3 + hosting_mode: highDensity + partition_count: 4 + register: invalid_partition_count + ignore_errors: yes + +- name: Assert task failed + assert: + that: + - invalid_partition_count.failed == True + +- name: Create invalid Azure Search - Partition Count + azure_rm_cognitivesearch: + name: "{{ search_name }}" + resource_group: "{{ resource_group }}" + partition_count: 7 + register: invalid_partition_count + ignore_errors: yes + +- name: Assert task failed + assert: + that: + - invalid_partition_count.failed == True + +- name: Create invalid Azure Search - Replica Count + azure_rm_cognitivesearch: + name: "{{ search_name }}" + resource_group: "{{ resource_group }}" + sku: standard + replica_count: 13 + register: invalid_replica_count + ignore_errors: yes + +- name: Assert task failed + assert: + that: + - invalid_replica_count.failed == True + +- name: Create invalid Azure Search - Replica Count SKU basic + azure_rm_cognitivesearch: + name: "{{ search_name }}" + resource_group: "{{ resource_group }}" + replica_count: 4 + sku: basic + register: invalid_replica_count + ignore_errors: yes + +- name: Assert task failed + assert: + that: + - invalid_replica_count.failed == True + +- name: Create basic Azure Search + azure_rm_cognitivesearch: + name: "{{ search_name }}" + resource_group: "{{ resource_group }}" + register: search_info + +- name: Assert status succeeded and results + assert: + that: + - search_info.changed + - search_info.state.id is defined + - search_info.state.identity.type == "None" + - search_info.state.identity.principal_id is not defined + - search_info.state.hosting_mode == "default" + - search_info.state.name == search_name + - search_info.state.partition_count == 1 + - search_info.state.replica_count == 1 + - search_info.state.sku == "basic" + - search_info.state.provisioning_state == "succeeded" + - search_info.state.public_network_access == "Enabled" + - search_info.state.status == "running" + +- name: Get info for Azure Search + azure_rm_cognitivesearch_info: + name: "{{ search_name }}" + resource_group: "{{ resource_group }}" + register: search_info_module + +- name: Assert status succeeded and results + assert: + that: + - search_info_module.search.id is defined + - search_info_module.search.identity.type == "None" + - search_info_module.search.identity.principal_id is not defined + - search_info_module.search.hosting_mode == "default" + - search_info_module.search.name == search_name + - search_info_module.search.partition_count == 1 + - search_info_module.search.replica_count == 1 + - search_info_module.search.sku == "basic" + - search_info_module.search.provisioning_state == "succeeded" + - search_info_module.search.public_network_access == "Enabled" + - search_info_module.search.status == "running" + +- name: Update basic Azure Search (Idempotence) + azure_rm_cognitivesearch: + name: "{{ search_name }}" + resource_group: "{{ resource_group }}" + register: search_info + +- name: Assert that idempotence is ok + assert: + that: + - not search_info.changed + +- name: Delete Azure Search + azure_rm_cognitivesearch: + name: "{{ search_name }}" + resource_group: "{{ resource_group }}" + state: absent + +- name: Create Azure Search '{{ search_name }}' not default parameters + azure_rm_cognitivesearch: + name: "{{ search_name_secondary }}" + resource_group: "{{ resource_group }}" + hosting_mode: default + identity: SystemAssigned + network_rule_set: + - 8.8.8.8/31 + - 1.1.1.1 + partition_count: 2 + public_network_access: disabled + replica_count: 2 + sku: standard + tags: + foo: bar + register: search_info + +- name: Assert status succeeded and results + assert: + that: + - search_info.changed + - search_info.state.id is defined + - search_info.state.identity.type == "SystemAssigned" + - search_info.state.identity.principal_id is defined + - search_info.state.hosting_mode == "default" + - search_info.state.name == search_name_secondary + - search_info.state.partition_count == 2 + - search_info.state.replica_count == 2 + - search_info.state.sku == "standard" + - search_info.state.provisioning_state == "succeeded" + - search_info.state.status == "running" + +- name: Update Azure Search '{{ search_name }}' not default parameters (Idempotence) + azure_rm_cognitivesearch: + name: "{{ search_name_secondary }}" + resource_group: "{{ resource_group }}" + hosting_mode: default + identity: SystemAssigned + network_rule_set: + - 8.8.8.8/31 + - 1.1.1.1 + partition_count: 2 + public_network_access: disabled + replica_count: 2 + sku: standard + tags: + foo: bar + register: search_info + +- name: Assert that idempotence is ok + assert: + that: + - not search_info.changed + +- name: Delete Azure Search + azure_rm_cognitivesearch: + name: "{{ search_name_secondary }}" + resource_group: "{{ resource_group }}" + state: absent + diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_containerinstance/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_containerinstance/aliases new file mode 100644 index 000000000..93066dcc2 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_containerinstance/aliases @@ -0,0 +1,4 @@ +cloud/azure +destructive +shippable/azure/group2 +azure_rm_containerinstance_info diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_containerinstance/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_containerinstance/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_containerinstance/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_containerinstance/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_containerinstance/tasks/main.yml new file mode 100644 index 000000000..44bb852a5 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_containerinstance/tasks/main.yml @@ -0,0 +1,356 @@ +- name: Set Container Instance Names + set_fact: + rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + +- name: Create sample container instance + azure_rm_containerinstance: + resource_group: "{{ resource_group }}" + name: "aci{{ rpfx }}" + os_type: linux + ip_address: public + location: eastus + ports: + - 80 + containers: + - name: mycontainer1 + image: httpd + memory: 1.5 + ports: + - 80 + - 81 + register: output + +- debug: + var: output + +- name: Assert the container instance is well created + assert: + that: + - output.changed + - output.provisioning_state == 'Succeeded' + +- name: Create sample container instance -- same parameters + azure_rm_containerinstance: + resource_group: "{{ resource_group }}" + name: "aci{{ rpfx }}" + os_type: linux + ip_address: public + location: eastus + ports: + - 80 + containers: + - name: mycontainer1 + image: httpd + memory: 1.5 + ports: + - 80 + - 81 + register: output + +- name: Assert the container instance is well created + assert: + that: + - output.changed == False + +- name: Create sample container instance -- force update + azure_rm_containerinstance: + resource_group: "{{ resource_group }}" + name: "aci{{ rpfx }}" + os_type: linux + ip_address: public + location: eastus + ports: + - 80 + containers: + - name: mycontainer1 + image: httpd + memory: 1.5 + ports: + - 80 + - 81 + force_update: yes + register: output + +- name: Assert the container instance is well created + assert: + that: + - output.changed + - output.provisioning_state == 'Succeeded' + +- name: Create second container instance for testing purposes + azure_rm_containerinstance: + resource_group: "{{ resource_group }}" + name: "aci{{ rpfx }}sec" + os_type: linux + ip_address: public + dns_name_label: mydnslabel{{ resource_group | hash('md5') | truncate(7, True, '') }} + location: eastus + restart_policy: on_failure + ports: + - 80 + containers: + - name: mycontainer1 + image: httpd + memory: 1.5 + ports: + - 80 + - 81 + environment_variables: + - name: myvar + value: myvarvalue + register: output + +- name: Create virtual network + azure_rm_virtualnetwork: + resource_group: "{{ resource_group }}" + name: "vnet{{ rpfx }}" + address_prefixes_cidr: + - 10.1.0.0/16 + - 172.100.0.0/16 + +- name: Create a subnet + azure_rm_subnet: + resource_group: "{{ resource_group }}" + name: "sub{{ rpfx }}" + virtual_network_name: "vnet{{ rpfx }}" + address_prefix_cidr: "10.1.0.0/24" + private_link_service_network_policies: Disabled + private_endpoint_network_policies: Disabled + delegations: + - name: delegation_to_containerinsance + serviceName: 'Microsoft.ContainerInstance/containerGroups' + register: subnet_output + +- name: Create third container instance for subnet_ids + azure_rm_containerinstance: + resource_group: "{{ resource_group }}" + name: "aci{{ rpfx }}thi" + os_type: linux + ip_address: private + location: eastus + subnet_ids: + - "{{ subnet_output.state.id }}" + ports: + - 80 + containers: + - name: mycontainer1 + image: httpd + memory: 1.5 + ports: + - 80 + - 81 + register: output + +- name: Assert the container instance is well created + assert: + that: + - output.changed + +- name: Gather facts for single Container Instance + azure_rm_containerinstance_info: + resource_group: "{{ resource_group }}" + name: "aci{{ rpfx }}sec" + register: output + +- debug: + var: output + +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.containerinstances[0]['resource_group'] != None + - output.containerinstances[0]['name'] != None + - output.containerinstances[0]['os_type'] != None + - output.containerinstances[0]['location'] != None + - output.containerinstances[0]['ip_address'] != None + - output.containerinstances[0]['ports'] != None + - output.containerinstances[0]['containers'] != None + - output.containerinstances[0]['containers'][0]['environment_variables'] | length == 1 + - output.containerinstances[0]['restart_policy'] == 'on_failure' + +- name: Gather facts for all Container Instances in the resource group + azure_rm_containerinstance_info: + resource_group: "{{ resource_group }}" + register: output + +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.containerinstances[0]['resource_group'] != None + - output.containerinstances[0]['name'] != None + - output.containerinstances[0]['os_type'] != None + - output.containerinstances[0]['location'] != None + - output.containerinstances[0]['ip_address'] != None + - output.containerinstances[0]['ports'] != None + - output.containerinstances[0]['containers'] != None + - output.containerinstances[1]['resource_group'] != None + - output.containerinstances[1]['name'] != None + - output.containerinstances[1]['os_type'] != None + - output.containerinstances[1]['location'] != None + - output.containerinstances[1]['ip_address'] != None + - output.containerinstances[1]['ports'] != None + - output.containerinstances[1]['containers'] != None + +- name: Create sample container instance with volume + azure_rm_containerinstance: + resource_group: "{{ resource_group }}" + name: "aci{{ rpfx }}thi" + os_type: linux + ip_address: public + location: eastus + ports: + - 80 + containers: + - name: mycontainer1 + image: mcr.microsoft.com/azuredocs/aci-helloworld + memory: 1.5 + volume_mounts: + - name: "my-filesharevolume" + mount_path: "/data/files" + ports: + - 80 + - 81 + volumes: + - name: "my-filesharevolume" + azure_file: + storage_account_name: "{{ storage_account_name }}" + share_name: "{{ file_share_name }}" + storage_account_key: "{{ storage_account_key }}" + register: output + ignore_errors: True + +- debug: + var: output + ignore_errors: True + +- name: Assert the container instance is well created + assert: + that: + - output.changed + - output.provisioning_state == 'Succeeded' + ignore_errors: True + +- name: Gather facts for Container Instance + azure_rm_containerinstance_info: + resource_group: "{{ resource_group }}" + name: "aci{{ rpfx }}thi" + register: output + ignore_errors: True + +- debug: + var: output + ignore_errors: True + +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.containerinstances[0]['resource_group'] != None + - output.containerinstances[0]['name'] != None + - output.containerinstances[0]['os_type'] != None + - output.containerinstances[0]['location'] != None + - output.containerinstances[0]['ip_address'] != None + - output.containerinstances[0]['volumes'] != None + - output.containerinstances[0]['ports'] != None + - output.containerinstances[0]['containers'] != None + - output.containerinstances[0]['containers'][0]['volume_mounts'] | length == 1 + - output.containerinstances[0]['containers'][0]['volume_mounts'][0]['name'] != None + - output.containerinstances[0]['containers'][0]['volume_mounts'][0]['mount_path'] != None + ignore_errors: True + +- name: Remove container instance + azure_rm_containerinstance: + resource_group: "{{ resource_group }}" + name: "aci{{ rpfx }}thi" + os_type: linux + ip_address: public + location: eastus + ports: + - 80 + containers: + - name: mycontainer1 + image: mcr.microsoft.com/azuredocs/aci-helloworld + memory: 1.5 + volume_mounts: + - name: "my-filesharevolume" + mount_path: "/data/files" + ports: + - 80 + - 81 + volumes: + - name: "my-filesharevolume" + azure_file: + storage_account_name: "{{ storage_account_name }}" + share_name: "{{ file_share_name }}" + storage_account_key: "{{ storage_account_key }}" + state: absent + register: output + ignore_errors: True + +- name: Remove container instance + azure_rm_containerinstance: + resource_group: "{{ resource_group }}" + name: "aci{{ rpfx }}" + os_type: linux + ip_address: public + location: eastus + ports: + - 80 + containers: + - name: mycontainer1 + image: httpd + memory: 1.5 + ports: + - 80 + - 81 + state: absent + register: output + +- name: Assert the container instance is deleted + assert: + that: + - output.changed + +- name: Remove container instance + azure_rm_containerinstance: + resource_group: "{{ resource_group }}" + name: "aci{{ rpfx }}sec" + os_type: linux + ip_address: public + location: eastus + ports: + - 80 + containers: + - name: mycontainer1 + image: httpd + memory: 1.5 + ports: + - 80 + - 81 + state: absent + +- name: Remove container instance again + azure_rm_containerinstance: + resource_group: "{{ resource_group }}" + name: "aci{{ rpfx }}" + os_type: linux + ip_address: public + location: eastus + ports: + - 80 + containers: + - name: mycontainer1 + image: httpd + memory: 1.5 + ports: + - 80 + - 81 + state: absent + register: output + +- name: Assert the changed is false + assert: + that: + - output.changed == False diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_containerregistry/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_containerregistry/aliases new file mode 100644 index 000000000..2615d3fe0 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_containerregistry/aliases @@ -0,0 +1,4 @@ +cloud/azure +shippable/azure/group2 +destructive +azure_rm_containerregistry_info diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_containerregistry/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_containerregistry/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_containerregistry/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_containerregistry/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_containerregistry/tasks/main.yml new file mode 100644 index 000000000..7c83c5c5d --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_containerregistry/tasks/main.yml @@ -0,0 +1,116 @@ + - name: Create an container registry + azure_rm_containerregistry: + name: "acr{{ resource_group | hash('md5') | truncate(7, True, '') }}" + resource_group: "{{ resource_group }}" + location: eastus2 + admin_user_enabled: true + sku: Premium + tags: + Release: beta1 + Environment: Production + register: output + + - name: Assert the container registry instance is well created + assert: + that: + - output.changed + - output.admin_user_enabled + - output.location == 'eastus2' + - output.sku == 'Premium' + - output.tags['Environment'] == 'Production' + - output.tags['Release'] == 'beta1' + - output.provisioning_state == 'Succeeded' + - output.credentials['password'] is defined + - output.credentials['password2'] is defined + + - name: Update the ACR instance sku, tags and admin_user_enabled + azure_rm_containerregistry: + name: "acr{{ resource_group | hash('md5') | truncate(7, True, '') }}" + resource_group: "{{ resource_group }}" + location: eastus2 + admin_user_enabled: false + sku: Standard + tags: + NewTag: newtag + Release: beta1 + Environment: Production + register: output + + - name: Create second container registry (to test facts) + azure_rm_containerregistry: + name: "acr{{ resource_group | hash('md5') | truncate(7, True, '') }}sec" + resource_group: "{{ resource_group }}" + location: eastus2 + admin_user_enabled: false + sku: Premium + tags: + Release: beta1 + Environment: Production + + - name: Assert the ACR instance is well updated + assert: + that: + - output.changed == True + - output.admin_user_enabled == False + - output.sku == 'Standard' + - output.tags['NewTag'] == 'newtag' + - output.credentials | length == 0 + - output.credentials['password'] is not defined + - output.credentials['password2'] is not defined + + - name: Gather facts for single Container Registry + azure_rm_containerregistry_info: + resource_group: "{{ resource_group }}" + name: "acr{{ resource_group | hash('md5') | truncate(7, True, '') }}" + register: output + + - name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.registries[0]['name'] != None + - output.registries[0]['location'] != None + - output.registries[0]['admin_user_enabled'] != None + - output.registries[0]['sku'] != None + - output.registries[0]['provisioning_state'] != None + - output.registries[0]['login_server'] != None + - output.registries[0]['id'] != None + - output.registries[0]['credentials'] != None + + - name: Gather facts for all Container Registries in the resource group + azure_rm_containerregistry_info: + resource_group: "{{ resource_group }}" + register: output + + - name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.registries[0]['name'] != None + - output.registries[0]['location'] != None + - output.registries[0]['admin_user_enabled'] != None + - output.registries[0]['sku'] != None + - output.registries[0]['provisioning_state'] != None + - output.registries[0]['login_server'] != None + - output.registries[0]['id'] != None + - output.registries[0]['credentials'] != None + - output.registries[1]['name'] != None + - output.registries[1]['location'] != None + - output.registries[1]['admin_user_enabled'] != None + - output.registries[1]['sku'] != None + - output.registries[1]['provisioning_state'] != None + - output.registries[1]['login_server'] != None + - output.registries[1]['id'] != None + - output.registries[1]['credentials'] != None + + - name: Delete first container registry + azure_rm_containerregistry: + name: "acr{{ resource_group | hash('md5') | truncate(7, True, '') }}" + resource_group: "{{ resource_group }}" + state: absent + + - name: Delete second container registry + azure_rm_containerregistry: + name: "acr{{ resource_group | hash('md5') | truncate(7, True, '') }}sec" + resource_group: "{{ resource_group }}" + state: absent diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_containerregistrytag/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_containerregistrytag/aliases new file mode 100644 index 000000000..aa77c071a --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_containerregistrytag/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group2 +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_containerregistrytag/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_containerregistrytag/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_containerregistrytag/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_containerregistrytag/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_containerregistrytag/tasks/main.yml new file mode 100644 index 000000000..6f90bcddf --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_containerregistrytag/tasks/main.yml @@ -0,0 +1,366 @@ +- name: Prepare random number + set_fact: + rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + run_once: true + +- name: Create an container registry + azure_rm_containerregistry: + name: "acr{{ rpfx }}" + resource_group: "{{ resource_group }}" + location: eastus2 + admin_user_enabled: true + sku: Standard + +- name: Load all tags + azure_rm_containerregistrytag_info: + registry: "acr{{ rpfx }}" + register: output +- name: Verify no tags exist + assert: + that: output.repositories | length == 0 + +- name: Load all tags for non-existant repository + azure_rm_containerregistrytag_info: + registry: "acr{{ rpfx }}" + repository_name: "does-not-exist" + register: output +- name: Verify no tags exist + assert: + that: output.repositories | length == 0 + +- name: Import tag (check mode) + azure_rm_containerregistrytag: + registry: "acr{{ rpfx }}" + repository_name: "app1" + name: "v1" + source_image: + registry_uri: "docker.io" + repository: "library/hello-world" + name: "latest" + check_mode: true + register: output +- name: Assert output + assert: + that: output.changed + +- name: Import tag (actually import) + azure_rm_containerregistrytag: + registry: "acr{{ rpfx }}" + repository_name: "app1" + name: "v1" + source_image: + registry_uri: "docker.io" + repository: "library/hello-world" + name: "latest" + register: output +- name: Assert output + assert: + that: output.changed + +- name: Import tag (test idempotency) + azure_rm_containerregistrytag: + registry: "acr{{ rpfx }}" + repository_name: "app1" + name: "v1" + source_image: + registry_uri: "docker.io" + repository: "library/hello-world" + name: "latest" + register: output +- name: Assert output + assert: + that: not output.changed + +- name: Import additional tag + azure_rm_containerregistrytag: + registry: "acr{{ rpfx }}" + repository_name: "app2" + name: "1.1.1" + source_image: + registry_uri: "docker.io" + repository: "library/hello-world" + name: "latest" + register: output +- name: Assert output + assert: + that: output.changed + +- name: Import additional tag + azure_rm_containerregistrytag: + registry: "acr{{ rpfx }}" + repository_name: "app2" + name: "test-image" + source_image: + registry_uri: "docker.io" + repository: "library/hello-world" + name: "latest" + register: output +- name: Assert output + assert: + that: output.changed + +- name: Import additional tag + azure_rm_containerregistrytag: + registry: "acr{{ rpfx }}" + repository_name: "app2" + name: "v1" + source_image: + registry_uri: "docker.io" + repository: "library/hello-world" + name: "latest" + register: output +- name: Assert output + assert: + that: output.changed + +- name: Load all tags + azure_rm_containerregistrytag_info: + registry: "acr{{ rpfx }}" + register: output +- name: Assert tags exist + assert: + that: + - output.repositories | length == 2 + - output.repositories[0].name == 'app1' + - output.repositories[0].tags | length == 1 + - output.repositories[0].tags[0].name == 'v1' + - output.repositories[1].name == 'app2' + - output.repositories[1].tags | length == 3 + - output.repositories[1].tags[0].name == '1.1.1' + - output.repositories[1].tags[1].name == 'test-image' + - output.repositories[1].tags[2].name == 'v1' + +- name: Load tags by repository + azure_rm_containerregistrytag_info: + registry: "acr{{ rpfx }}" + repository_name: "app2" + register: output +- name: Assert tags exist + assert: + that: + - output.repositories | length == 1 + - output.repositories[0].name == 'app2' + - output.repositories[0].tags | length == 3 + - output.repositories[0].tags[0].name == '1.1.1' + - output.repositories[0].tags[1].name == 'test-image' + - output.repositories[0].tags[2].name == 'v1' + +- name: Load tags by repository and name + azure_rm_containerregistrytag_info: + registry: "acr{{ rpfx }}" + repository_name: "app2" + name: "test-image" + register: output +- name: Assert tags exist + assert: + that: + - output.repositories | length == 1 + - output.repositories[0].name == 'app2' + - output.repositories[0].tags | length == 1 + - output.repositories[0].tags[0].name == 'test-image' + +- name: Load tags by name + azure_rm_containerregistrytag_info: + registry: "acr{{ rpfx }}" + name: "v1" + register: output +- name: Assert tags exist + assert: + that: + - output.repositories | length == 2 + - output.repositories[0].name == 'app1' + - output.repositories[0].tags | length == 1 + - output.repositories[0].tags[0].name == 'v1' + - output.repositories[1].name == 'app2' + - output.repositories[1].tags | length == 1 + - output.repositories[1].tags[0].name == 'v1' + +- name: Delete tag by name (check mode) + azure_rm_containerregistrytag: + registry: "acr{{ rpfx }}" + repository_name: "app2" + name: "test-image" + state: "absent" + check_mode: true + register: output +- name: Assert output + assert: + that: output.changed + +- name: Delete tag by name (actually delete) + azure_rm_containerregistrytag: + registry: "acr{{ rpfx }}" + repository_name: "app2" + name: "test-image" + state: "absent" + register: output +- name: Assert output + assert: + that: output.changed + +- name: Delete tag by name (test idempotency) + azure_rm_containerregistrytag: + registry: "acr{{ rpfx }}" + repository_name: "app2" + name: "test-image" + state: "absent" + register: output +- name: Assert output + assert: + that: not output.changed + +- name: Load tags by repository + azure_rm_containerregistrytag_info: + registry: "acr{{ rpfx }}" + repository_name: "app2" + register: output +- name: Assert tags exist + assert: + that: + - output.repositories | length == 1 + - output.repositories[0].name == 'app2' + - output.repositories[0].tags | length == 2 + - output.repositories[0].tags[0].name == '1.1.1' + - output.repositories[0].tags[1].name == 'v1' + +- name: Delete repository (check mode) + azure_rm_containerregistrytag: + registry: "acr{{ rpfx }}" + repository_name: "app2" + state: "absent" + check_mode: true + register: output +- name: Assert output + assert: + that: output.changed + +- name: Delete repository (actually delete) + azure_rm_containerregistrytag: + registry: "acr{{ rpfx }}" + repository_name: "app2" + state: "absent" + register: output +- name: Assert output + assert: + that: output.changed + +- name: Delete repository (test idempotency) + azure_rm_containerregistrytag: + registry: "acr{{ rpfx }}" + repository_name: "app2" + state: "absent" + register: output +- name: Assert output + assert: + that: not output.changed + +- name: Load all tags + azure_rm_containerregistrytag_info: + registry: "acr{{ rpfx }}" + register: output +- name: Assert tags exist + assert: + that: + - output.repositories | length == 1 + - output.repositories[0].name == 'app1' + - output.repositories[0].tags | length == 1 + - output.repositories[0].tags[0].name == 'v1' + +- name: Import tag with same name + azure_rm_containerregistrytag: + registry: "acr{{ rpfx }}" + source_image: + registry_uri: "docker.io" + repository: "library/hello-world" + name: "latest" + register: output +- name: Assert output + assert: + that: output.changed + +- name: Import tag with same name (test idempotency) + azure_rm_containerregistrytag: + registry: "acr{{ rpfx }}" + source_image: + registry_uri: "docker.io" + repository: "library/hello-world" + name: "latest" + register: output +- name: Assert output + assert: + that: not output.changed + +- name: Import tag with different repo, same name + azure_rm_containerregistrytag: + registry: "acr{{ rpfx }}" + repository_name: app1 + source_image: + registry_uri: "docker.io" + repository: "library/hello-world" + name: "latest" + register: output +- name: Assert output + assert: + that: output.changed + +- name: Import tag with different repo, same name (test idempotency) + azure_rm_containerregistrytag: + registry: "acr{{ rpfx }}" + repository_name: app1 + source_image: + registry_uri: "docker.io" + repository: "library/hello-world" + name: "latest" + register: output +- name: Assert output + assert: + that: not output.changed + +- name: Import tag with different name, same repo + azure_rm_containerregistrytag: + registry: "acr{{ rpfx }}" + name: "myversion" + source_image: + registry_uri: "docker.io" + repository: "library/hello-world" + name: "latest" + register: output +- name: Assert output + assert: + that: output.changed + +- name: Import tag with different name, same repo (test idempotency) + azure_rm_containerregistrytag: + registry: "acr{{ rpfx }}" + name: "myversion" + source_image: + registry_uri: "docker.io" + repository: "library/hello-world" + name: "latest" + register: output +- name: Assert output + assert: + that: not output.changed + +- name: Load all tags + azure_rm_containerregistrytag_info: + registry: "acr{{ rpfx }}" + register: output +- name: Assert tags exist + assert: + that: + - output.repositories | length == 2 + - output.repositories[0].name == 'app1' + - output.repositories[0].tags | length == 2 + - output.repositories[0].tags[0].name == 'latest' + - output.repositories[0].tags[1].name == 'v1' + - output.repositories[1].name == 'library/hello-world' + - output.repositories[1].tags | length == 2 + - output.repositories[1].tags[0].name == 'latest' + - output.repositories[1].tags[1].name == 'myversion' + +- name: Delete container registry + azure_rm_containerregistry: + name: "acr{{ rpfx }}" + resource_group: "{{ resource_group }}" + state: "absent" diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cosmosdbaccount/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cosmosdbaccount/aliases new file mode 100644 index 000000000..8f0d8a0e4 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cosmosdbaccount/aliases @@ -0,0 +1,4 @@ +cloud/azure +destructive +shippable/azure/group14 +azure_rm_cosmosdbaccount_info diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cosmosdbaccount/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cosmosdbaccount/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cosmosdbaccount/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cosmosdbaccount/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cosmosdbaccount/tasks/main.yml new file mode 100644 index 000000000..ca1cc8b90 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cosmosdbaccount/tasks/main.yml @@ -0,0 +1,365 @@ +- name: Prepare random number + set_fact: + dbname: "cosmos-{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + db2name: "cosmos2-{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + vnname: "vn{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + subnetname: "subnet{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + free_tier_supported: false # https://github.com/ansible-collections/azure/pull/675#discussion_r843584406 + run_once: yes + +- name: Create virtual network + azure_rm_virtualnetwork: + resource_group: "{{ resource_group }}" + name: "{{ vnname }}" + address_prefixes_cidr: + - 10.1.0.0/16 + - 172.100.0.0/16 + dns_servers: + - 127.0.0.1 + - 127.0.0.3 + +- name: Add subnet + azure_rm_subnet: + name: "{{ subnetname }}" + virtual_network_name: "{{ vnname }}" + resource_group: "{{ resource_group }}" + address_prefix_cidr: "10.1.0.0/24" + +- name: Create instance of Database Account -- check mode + azure_rm_cosmosdbaccount: + resource_group: "{{ resource_group }}" + name: "{{ dbname }}" + location: eastasia + geo_rep_locations: + - name: eastasia + failover_priority: 0 + database_account_offer_type: Standard + check_mode: yes + register: output +- name: Assert the resource instance is well created + assert: + that: + - output.changed + +- name: Create instance of Database Account + azure_rm_cosmosdbaccount: + resource_group: "{{ resource_group }}" + name: "{{ dbname }}" + location: eastasia + kind: global_document_db + geo_rep_locations: + - name: eastasia + failover_priority: 0 + - name: westus + failover_priority: 1 + database_account_offer_type: Standard + is_virtual_network_filter_enabled: yes + virtual_network_rules: + - subnet: + resource_group: "{{ resource_group }}" + virtual_network_name: "{{ vnname }}" + subnet_name: "{{ subnetname }}" + ignore_missing_v_net_service_endpoint: yes + register: output +- name: Assert the resource instance is well created + assert: + that: + - output.changed + +- name: Create again instance of Database Account + azure_rm_cosmosdbaccount: + resource_group: "{{ resource_group }}" + name: "{{ dbname }}" + location: eastasia + kind: global_document_db + geo_rep_locations: + - name: eastasia + failover_priority: 0 + - name: westus + failover_priority: 1 + database_account_offer_type: Standard + is_virtual_network_filter_enabled: yes + virtual_network_rules: + - subnet: + resource_group: "{{ resource_group }}" + virtual_network_name: "{{ vnname }}" + subnet_name: "{{ subnetname }}" + ignore_missing_v_net_service_endpoint: yes + register: output +- name: Assert the state has not changed + assert: + that: + - output.changed == false + +- name: Create again instance of Database Account -- change something + azure_rm_cosmosdbaccount: + resource_group: "{{ resource_group }}" + name: "{{ dbname }}" + location: eastasia + kind: global_document_db + geo_rep_locations: + - name: eastasia + failover_priority: 0 + - name: westus + failover_priority: 1 + database_account_offer_type: Standard + is_virtual_network_filter_enabled: yes + virtual_network_rules: + - subnet: + resource_group: "{{ resource_group }}" + virtual_network_name: "{{ vnname }}" + subnet_name: "{{ subnetname }}" + ignore_missing_v_net_service_endpoint: yes + enable_automatic_failover: yes + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Create second instance of Database Account + azure_rm_cosmosdbaccount: + resource_group: "{{ resource_group_secondary }}" + name: "{{ db2name }}" + location: eastasia + kind: global_document_db + geo_rep_locations: + - name: eastasia + failover_priority: 0 + - name: westus + failover_priority: 1 + database_account_offer_type: Standard + is_virtual_network_filter_enabled: yes + virtual_network_rules: + - subnet: + resource_group: "{{ resource_group }}" + virtual_network_name: "{{ vnname }}" + subnet_name: "{{ subnetname }}" + ignore_missing_v_net_service_endpoint: yes + register: output +- name: Assert the resource instance is well created + assert: + that: + - output.changed + +- name: Get facts of single account + azure_rm_cosmosdbaccount_info: + resource_group: "{{ resource_group }}" + name: "{{ dbname }}" + register: output +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.accounts | length == 1 + - output.accounts[0]['id'] != None + - output.accounts[0]['resource_group'] == resource_group + - output.accounts[0]['name'] == dbname + - output.accounts[0]['location'] == 'eastasia' + - output.accounts[0]['kind'] != None + - output.accounts[0]['consistency_policy'] != None + - output.accounts[0]['failover_policies'] != None + - output.accounts[0]['read_locations'] != None + - output.accounts[0]['write_locations'] != None + - output.accounts[0]['database_account_offer_type'] != None + - output.accounts[0]['ip_range_filter'] != None + - output.accounts[0]['ip_rules'] != None + - output.accounts[0]['is_virtual_network_filter_enabled'] != None + - output.accounts[0]['enable_automatic_failover'] != None + - output.accounts[0]['enable_cassandra'] != None + - output.accounts[0]['enable_table'] != None + - output.accounts[0]['enable_gremlin'] != None + - output.accounts[0]['virtual_network_rules'] != None + - output.accounts[0]['enable_multiple_write_locations'] != None + - output.accounts[0]['document_endpoint'] != None + - output.accounts[0]['provisioning_state'] != None + - output.accounts[0]['tags'] != None + - output.accounts[0]['enable_free_tier'] == false + - output.accounts[0]['public_network_access'] == 'Enabled' + +- name: Get facts with keys + azure_rm_cosmosdbaccount_info: + resource_group: "{{ resource_group }}" + name: "{{ dbname }}" + retrieve_keys: all + register: output +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.accounts[0]['primary_master_key'] != None + - output.accounts[0]['secondary_master_key'] != None + - output.accounts[0]['primary_readonly_master_key'] != None + - output.accounts[0]['secondary_readonly_master_key'] != None + +- name: Get facts with readonly keys + azure_rm_cosmosdbaccount_info: + resource_group: "{{ resource_group }}" + name: "{{ dbname }}" + retrieve_keys: readonly + retrieve_connection_strings: yes + register: output +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - "'primary_master_key' not in output.accounts[0]" + - "'secondary_master_key' not in output.accounts[0]" + - output.accounts[0]['primary_readonly_master_key'] != None + - output.accounts[0]['secondary_readonly_master_key'] != None + - output.accounts[0]['connection_strings'] | length > 0 + +- name: List accounts by resource group + azure_rm_cosmosdbaccount_info: + resource_group: "{{ resource_group }}" + register: output +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.accounts | length == 1 + - output.accounts[0]['id'] != None + - output.accounts[0]['resource_group'] == resource_group + - output.accounts[0]['name'] == dbname + - output.accounts[0]['location'] == 'eastasia' + - output.accounts[0]['kind'] != None + - output.accounts[0]['consistency_policy'] != None + - output.accounts[0]['failover_policies'] != None + - output.accounts[0]['read_locations'] != None + - output.accounts[0]['write_locations'] != None + - output.accounts[0]['database_account_offer_type'] != None + - output.accounts[0]['ip_range_filter'] != None + - output.accounts[0]['ip_rules'] != None + - output.accounts[0]['is_virtual_network_filter_enabled'] != None + - output.accounts[0]['enable_automatic_failover'] != None + - output.accounts[0]['enable_cassandra'] != None + - output.accounts[0]['enable_table'] != None + - output.accounts[0]['enable_gremlin'] != None + - output.accounts[0]['virtual_network_rules'] != None + - output.accounts[0]['enable_multiple_write_locations'] != None + - output.accounts[0]['document_endpoint'] != None + - output.accounts[0]['provisioning_state'] != None + - output.accounts[0]['tags'] != None + - output.accounts[0]['enable_free_tier'] == false + - output.accounts[0]['public_network_access'] == 'Enabled' + +- name: List all accounts + azure_rm_cosmosdbaccount_info: + register: output +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.accounts | length >= 2 + - dbname in (output.accounts | map(attribute='name')) + - db2name in (output.accounts | map(attribute='name')) + +- name: Create 4.0 Mongo free-tier instance with public networking disabled + azure_rm_cosmosdbaccount: + resource_group: "{{ resource_group }}" + name: "{{ dbname }}-free4" + location: eastasia + kind: mongo_db + mongo_version: "4.0" + enable_free_tier: "{{ free_tier_supported }}" + public_network_access: "Disabled" + ip_rules: + - "1.1.1.1" + - "2.2.2.2/28" + geo_rep_locations: + - name: eastasia + failover_priority: 0 + - name: westus + failover_priority: 1 + database_account_offer_type: Standard + is_virtual_network_filter_enabled: yes + virtual_network_rules: + - subnet: + resource_group: "{{ resource_group }}" + virtual_network_name: "{{ vnname }}" + subnet_name: "{{ subnetname }}" + ignore_missing_v_net_service_endpoint: yes + enable_automatic_failover: yes + register: output +- name: Assert resource created + assert: + that: + - output.changed + +- name: Get facts for free tier account + azure_rm_cosmosdbaccount_info: + resource_group: "{{ resource_group }}" + name: "{{ dbname }}-free4" + register: output +- name: Assert that facts are returned + assert: + that: + - output.accounts[0]['mongo_version'] == '4.0' + - output.accounts[0]['enable_free_tier'] == free_tier_supported + - output.accounts[0]['public_network_access'] == 'Disabled' + - output.accounts[0]['ip_range_filter'] == '1.1.1.1,2.2.2.2/28' + - (output.accounts[0]['ip_rules'] | length) == 2 + - output.accounts[0]['ip_rules'][0] == '1.1.1.1' + - output.accounts[0]['ip_rules'][1] == '2.2.2.2/28' + +- name: Delete instance of Database Account -- check mode + azure_rm_cosmosdbaccount: + resource_group: "{{ resource_group }}" + name: "{{ dbname }}" + state: absent + check_mode: yes + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Delete instance of Database Account + azure_rm_cosmosdbaccount: + resource_group: "{{ resource_group }}" + name: "{{ dbname }}" + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Delete unexisting instance of Database Account + azure_rm_cosmosdbaccount: + resource_group: "{{ resource_group }}" + name: "{{ dbname }}" + state: absent + register: output +- name: Assert the state has not changed + assert: + that: + - output.changed == false + +- name: Delete second instance of Database Account + azure_rm_cosmosdbaccount: + resource_group: "{{ resource_group_secondary }}" + name: "{{ db2name }}" + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Delete free tier instance of Database Account + azure_rm_cosmosdbaccount: + resource_group: "{{ resource_group }}" + name: "{{ dbname }}-free4" + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Clean up virtual network + azure_rm_virtualnetwork: + resource_group: "{{ resource_group }}" + name: "{{ vnname }}" + state: absent diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_datafactory/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_datafactory/aliases new file mode 100644 index 000000000..5bec11dd5 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_datafactory/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group11 +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_datafactory/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_datafactory/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_datafactory/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_datafactory/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_datafactory/tasks/main.yml new file mode 100644 index 000000000..25afbe8bb --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_datafactory/tasks/main.yml @@ -0,0 +1,76 @@ +- name: Create data factory name + set_fact: + name: "df{{ resource_group | hash('md5') | truncate(21, True, '') }}" + +- name: Create data factory (Checkmode) + azure_rm_datafactory: + resource_group: "{{ resource_group }}" + name: "{{ name }}" + public_network_access: Enabled + tags: + key1: value1 + check_mode: True + +- name: Create data factory + azure_rm_datafactory: + resource_group: "{{ resource_group }}" + name: "{{ name }}" + public_network_access: Enabled + tags: + key1: value1 + register: output + +- assert: + that: + - output.changed + +- name: Create data factory again (Idempotent test) + azure_rm_datafactory: + resource_group: "{{ resource_group }}" + name: "{{ name }}" + public_network_access: Enabled + tags: + key1: value1 + register: output + +- assert: + that: + - not output.changed + +- name: Update data factory + azure_rm_datafactory: + resource_group: "{{ resource_group }}" + name: "{{ name }}" + public_network_access: Disabled + tags: + key1: value1 + key2: value2 + register: output + +- assert: + that: + - output.changed + +- name: Get data factory info + azure_rm_datafactory_info: + resource_group: "{{ resource_group }}" + name: "{{ name }}" + register: output + +- name: Assert status succeed and result match expectations + assert: + that: + - output.datafactory[0].tags | length == 2 + - output.datafactory[0].public_network_access == 'Disabled' + + +- name: Delete data factory + azure_rm_datafactory: + resource_group: "{{ resource_group }}" + name: "{{ name }}" + state: absent + register: output + +- assert: + that: + - output.changed diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_datalakestore/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_datalakestore/aliases new file mode 100644 index 000000000..5d29c6c4d --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_datalakestore/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group10 +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_datalakestore/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_datalakestore/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_datalakestore/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_datalakestore/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_datalakestore/tasks/main.yml new file mode 100644 index 000000000..1b9e2409b --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_datalakestore/tasks/main.yml @@ -0,0 +1,203 @@ +- name: Create data lake store name + set_fact: + adl_name: "adl{{ resource_group_datalake | hash('md5') | truncate(21, True, '') }}" + vnet_name: "vnet{{ resource_group_datalake | hash('md5') | truncate(20, True, '') }}" + +- name: Create virtual network + azure_rm_virtualnetwork: + name: "{{ vnet_name }}" + resource_group: "{{ resource_group_datalake }}" + address_prefixes_cidr: + - 10.1.0.0/16 + register: vnet_output + +- name: Create subnet + azure_rm_subnet: + name: foobar + virtual_network_name: "{{ vnet_name }}" + resource_group: "{{ resource_group_datalake }}" + address_prefix_cidr: "10.1.1.0/24" + service_endpoints: + - service: Microsoft.AzureActiveDirectory + register: subnet_output + +- name: Create minimal data lake store + azure_rm_datalakestore: + resource_group: "{{ resource_group_datalake }}" + name: "{{ adl_name }}" + register: output + +- name: Assert status succeeded and results + assert: + that: + - output.changed + - output.state.id is defined + - output.state.account_id is defined + - output.state.creation_time is defined + - output.state.current_tier == "Consumption" + - output.state.encryption_state == "Enabled" + - output.state.endpoint == "{{ adl_name }}.azuredatalakestore.net" + - output.state.firewall_allow_azure_ips == "Disabled" + - output.state.firewall_rules | length == 0 + - output.state.firewall_state == "Disabled" + - output.state.last_modified_time is defined + - output.state.new_tier == "Consumption" + - output.state.provisioning_state == "Succeeded" + - output.state.trusted_id_provider_state == "Disabled" + +- name: Create minimal data lake store (Idempotence) + azure_rm_datalakestore: + resource_group: "{{ resource_group_datalake }}" + name: "{{ adl_name }}" + register: output + +- name: Assert that status has not changed + assert: + that: + - not output.changed + +- name: Update data lake store to add virtual_network_rules + azure_rm_datalakestore: + resource_group: "{{ resource_group_datalake }}" + name: "{{ adl_name }}" + virtual_network_rules: + - name: vnet_rule_1 + subnet_id: "{{ subnet_output.state.id }}" + register: output + +- name: Assert status succeeded and results include virtual_network_rules + assert: + that: + - output.changed + - output.state.virtual_network_rules | length == 1 + - output.state.virtual_network_rules[0].name == "vnet_rule_1" + - output.state.virtual_network_rules[0].subnet_id == "{{ subnet_output.state.id }}" + +- name: Update data lake store to change encryption state that must fail + azure_rm_datalakestore: + resource_group: "{{ resource_group_datalake }}" + name: "{{ adl_name }}" + encryption_state: Disabled + register: output + ignore_errors: yes + +- name: Assert that encryption state cannot change + assert: + that: + - not output.changed + - output.msg == 'Encryption type cannot be updated.' + +- name: Update data lake store to add new_tier + azure_rm_datalakestore: + resource_group: "{{ resource_group_datalake }}" + name: "{{ adl_name }}" + new_tier: Commitment_1TB + register: output + +- name: Assert status succeeded and results include virtual_network_rules + assert: + that: + - output.changed + - output.state.current_tier == "Consumption" + - output.state.new_tier == "Commitment_1TB" + +- name: Delete minimal data lake store + azure_rm_datalakestore: + resource_group: "{{ resource_group_datalake }}" + name: "{{ adl_name }}" + state: absent + register: output + +- name: Create new data lake store + azure_rm_datalakestore: + resource_group: "{{ resource_group_datalake }}" + name: "{{ adl_name }}" + tags: + P1: V1 + P2: V4 + P3: V3 + new_tier: Commitment_1TB + default_group: default_group_test + encryption_state: Enabled + firewall_state: Enabled + firewall_allow_azure_ips: Enabled + firewall_rules: + - + name: test_rule_1 + start_ip_address: 192.168.1.1 + end_ip_address: 192.168.1.254 + - + name: test_rule_2 + start_ip_address: 10.0.0.1 + end_ip_address: 10.1.0.1 + virtual_network_rules: + - name: vnet_rule_1 + subnet_id: "{{ subnet_output.state.id }}" + register: output + +- name: Assert status succeeded and results include an Id value + assert: + that: + - output.changed + - output.state.id is defined + - output.state.account_id is defined + - output.state.creation_time is defined + - output.state.current_tier == "Commitment_1TB" + - output.state.default_group == "default_group_test" + - output.state.encryption_state == "Enabled" + - output.state.endpoint == "{{ adl_name }}.azuredatalakestore.net" + - output.state.firewall_allow_azure_ips == "Enabled" + - output.state.firewall_rules | length == 2 + - output.state.firewall_state == "Enabled" + - output.state.last_modified_time is defined + - output.state.new_tier == "Commitment_1TB" + - output.state.provisioning_state == "Succeeded" + - output.state.tags | length == 3 + - output.state.trusted_id_provider_state == "Disabled" + - output.state.virtual_network_rules | length == 1 + - output.state.virtual_network_rules[0].name == "vnet_rule_1" + - output.state.virtual_network_rules[0].subnet_id == "{{ subnet_output.state.id }}" + +- name: Create new data lake store (Idempotence) + azure_rm_datalakestore: + resource_group: "{{ resource_group_datalake }}" + name: "{{ adl_name }}" + tags: + P1: V1 + P2: V4 + P3: V3 + new_tier: Commitment_1TB + default_group: default_group_test + encryption_state: Enabled + firewall_state: Enabled + firewall_allow_azure_ips: Enabled + firewall_rules: + - + name: test_rule_1 + start_ip_address: 192.168.1.1 + end_ip_address: 192.168.1.254 + - + name: test_rule_2 + start_ip_address: 10.0.0.1 + end_ip_address: 10.1.0.1 + virtual_network_rules: + - name: vnet_rule_1 + subnet_id: "{{ subnet_output.state.id }}" + register: output + +- name: Assert that status has not changed + assert: + that: + - not output.changed + +- name: Delete virtual network + azure_rm_virtualnetwork: + name: "{{ vnet_name }}" + resource_group: "{{ resource_group_datalake }}" + state: absent + +- name: Delete Data Lake Store + azure_rm_datalakestore: + resource_group: "{{ resource_group_datalake }}" + name: "{{ adl_name }}" + state: absent diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_ddosprotectionplan/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_ddosprotectionplan/aliases new file mode 100644 index 000000000..5cf25760d --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_ddosprotectionplan/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group10 +destructive \ No newline at end of file diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_ddosprotectionplan/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_ddosprotectionplan/meta/main.yml new file mode 100644 index 000000000..cf34ae763 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_ddosprotectionplan/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure \ No newline at end of file diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_ddosprotectionplan/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_ddosprotectionplan/tasks/main.yml new file mode 100644 index 000000000..a61003680 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_ddosprotectionplan/tasks/main.yml @@ -0,0 +1,82 @@ +- name: Create random ddos protection plan + set_fact: + ddosprotectionplan: "test{{ resource_group | hash('md5') | truncate(16, True, '') + (65535 | random | string) }}" + +- name: Create DDoS protection plan (check mode) + azure_rm_ddosprotectionplan: + location: eastus2 + name: "{{ ddosprotectionplan }}" + resource_group: "{{ resource_group }}" + check_mode: yes + register: results + +- assert: + that: results.changed + +- name: Create DDoS protection plan + azure_rm_ddosprotectionplan: + location: eastus2 + name: "{{ ddosprotectionplan }}" + resource_group: "{{ resource_group }}" + register: results + +- assert: + that: results.changed + +- name: Update DDoS protection plan + azure_rm_ddosprotectionplan: + location: eastus2 + name: "{{ ddosprotectionplan }}" + resource_group: "{{ resource_group }}" + tags: + test: modified + register: results + +- assert: + that: + - results.changed + - results.state.tags.test == 'modified' + +- name: Retrieve DDoS protection plan + azure_rm_ddosprotectionplan_info: + name: "{{ ddosprotectionplan }}" + resource_group: "{{ resource_group }}" + register: results + +- name: Assert that facts module returned result + assert: + that: + - results.ddosprotectionplan[0].tags.test == 'modified' + +- name: Test idempotent + azure_rm_ddosprotectionplan: + location: eastus2 + name: "{{ ddosprotectionplan }}" + resource_group: "{{ resource_group }}" + register: results + +- assert: + that: + - not results.changed + +# +# azure_rm_ddosprotectionplan cleanup +# + +- name: Delete DDoS protection plan + azure_rm_ddosprotectionplan: + location: eastus2 + name: "{{ ddosprotectionplan }}" + resource_group: "{{ resource_group }}" + state: absent + +- name: Delete DDoS protection plan + azure_rm_ddosprotectionplan: + location: eastus2 + name: "{{ ddosprotectionplan }}" + resource_group: "{{ resource_group }}" + state: absent + register: results + +- assert: + that: not results.changed diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_deployment/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_deployment/aliases new file mode 100644 index 000000000..2cc249376 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_deployment/aliases @@ -0,0 +1,4 @@ +cloud/azure +destructive +disabled +shippable/azure/group1 diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_deployment/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_deployment/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_deployment/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_deployment/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_deployment/tasks/main.yml new file mode 100644 index 000000000..7e83a4c28 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_deployment/tasks/main.yml @@ -0,0 +1,70 @@ +- name: Create random dns label + set_fact: + dns_label: "test{{ resource_group | hash('md5') | truncate(16, True, '') + (65535 | random | string) }}" + +- name: Error Create Azure Deploy + azure_rm_deployment: + resource_group: "{{ resource_group }}" + location: "eastus" + template_link: 'https://raw.githubusercontent.com/Azure/azure-quickstart-templates/d01a5c06f4f1bc03a049ca17bbbd6e06d62657b3/101-vm-simple-linux/azuredeploy.json' + deployment_name: "{{ dns_label }}" + parameters: + adminUsername: + value: chouseknecht + adminPassword: + value: password123! + dnsLabelPrefix: + value: "{{ dns_label }}" + ubuntuOSVersion: + value: "Never-LTS" + register: output + ignore_errors: yes + +- name: Create Azure Deploy + azure_rm_deployment: + resource_group: "{{ resource_group }}" + location: "eastus" + template_link: 'https://raw.githubusercontent.com/Azure/azure-quickstart-templates/d01a5c06f4f1bc03a049ca17bbbd6e06d62657b3/101-vm-simple-linux/azuredeploy.json' + deployment_name: "{{ dns_label }}" + parameters: + adminUsername: + value: chouseknecht + adminPassword: + value: password123! + dnsLabelPrefix: + value: "{{ dns_label }}" + ubuntuOSVersion: + value: "16.04.0-LTS" + register: output + +- name: Add new instance to host group + add_host: + hostname: "{{ item.vm_name }}" + ansible_host: "{{ item['ips'][0].public_ip }}" + ansible_user: chouseknecht + ansible_ssh_pass: password123! + groupname: azure_vms + with_items: "{{ output.deployment.instances }}" + +- name: Get Deployment Facts for Resource Group + azure_rm_deployment_info: + resource_group: "{{ resource_group }}" + register: output +- debug: + var: output + +- name: Get Deployment Facts for named deployment + azure_rm_deployment_info: + resource_group: "{{ resource_group }}" + name: "{{ dns_label }}" + register: output +- debug: + var: output + +- name: Assert that values are returned + assert: + that: + - not output.changed + - output.deployments[0]['provisioning_state'] != None + - output.deployments[0]['output_resources'] | length > 0 + - output.deployments[0]['outputs'] | length > 0 diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_devtestlab/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_devtestlab/aliases new file mode 100644 index 000000000..4353a9ee7 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_devtestlab/aliases @@ -0,0 +1,17 @@ +cloud/azure +destructive +shippable/azure/group15 +azure_rm_devtestlab +azure_rm_devtestlab_info +azure_rm_devtestlabarmtemplate_info +azure_rm_devtestlabcustomimage +azure_rm_devtestlabcustomimage_info +azure_rm_devtestlabpolicy +azure_rm_devtestlabpolicy_info +azure_rm_devtestlabschedule +azure_rm_devtestlabschedule_info +azure_rm_devtestlabvirtualmachine +azure_rm_devtestlabvirtualmachine_info +azure_rm_devtestlabvirtualnetwork +azure_rm_devtestlabvirtualnetwork_info +disabled diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_devtestlab/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_devtestlab/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_devtestlab/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_devtestlab/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_devtestlab/tasks/main.yml new file mode 100644 index 000000000..87d63ad88 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_devtestlab/tasks/main.yml @@ -0,0 +1,218 @@ +- name: Set devtest labe value + set_fact: + rpfx: "{{ resource_group | hash('md5') | truncate(20, True, '') }}" + +- name: Create a DevTest Lab (check mode) + azure_rm_devtestlab: + resource_group: "{{ resource_group }}" + name: "dev-{{ rpfx }}" + storage_type: standard + premium_data_disks: true + tags: + key1: value1 + check_mode: yes + +- name: Create a DevTest Lab + azure_rm_devtestlab: + resource_group: "{{ resource_group }}" + name: "dev-{{ rpfx }}" + storage_type: standard + premium_data_disks: true + tags: + key1: value1 + register: output + +- name: Check the devtest lab changed + assert: + that: output.changed + +- name: Create a DevTest Lab (Idempotent test) + azure_rm_devtestlab: + resource_group: "{{ resource_group }}" + name: "dev-{{ rpfx }}" + storage_type: standard + premium_data_disks: true + tags: + key1: value1 + register: output + +- name: Check the devtest lab not changed + assert: + that: not output.changed + +- name: Get devtest lab facts + azure_rm_devtestlab_info: + resource_group: "{{ resource_group }}" + name: "dev-{{ rpfx }}" + register: output + +- name: Assert the devtest lab create success + assert: + that: + - output.labs | length == 1 + - output.labs[0].provisioning_state == "Succeeded" + +- name: Create a devtest lab virtual network + azure_rm_devtestlabvirtualnetwork: + resource_group: "{{ resource_group }}" + lab_name: "dev-{{ rpfx }}" + name: "vnet-{{ rpfx }}" + description: "My Lab Virtual network" + tags: + key1: value1 + register: output + +- name: Assert the devtest lab virtual network create successfully + assert: + that: + - output.changed + +- name: Get the devtest lab virtual network + azure_rm_devtestlabvirtualnetwork_info: + resource_group: "{{ resource_group }}" + lab_name: "dev-{{ rpfx }}" + name: "vnet-{{ rpfx }}" + register: output + +- name: Assert the devtest lab virtual network facts + assert: + that: + - output.virtualnetworks | length == 1 + - output.virtualnetworks[0].description == "My Lab Virtual network" + - output.virtualnetworks[0].provisioning_state == "Succeeded" + +- name: Create a dev test lab Virtual machine + azure_rm_devtestlabvirtualmachine: + resource_group: "{{ resource_group }}" + lab_name: "dev-{{ rpfx }}" + name: "vm-{{ rpfx }}" + notes: "Notes of Virtual machine" + os_type: linux + vm_size: Standard_A2_v2 + user_name: azureuser + password: Password@0329 + lab_subnet: + name: "vnet-{{ rpfx }}Subnet" + virtual_network_name: "vnet-{{ rpfx }}" + image: + offer: UbuntuServer + publisher: Canonical + sku: 16.04-LTS + os_type: Linux + version: latest + allow_claim: no + expiration_date: "2025-10-22T01:49:12.117974Z" + register: output + +- name: Assert the devtest lab virtual machine create successfully + assert: + that: + - output.changed + +- name: Get devtest lab info + azure_rm_devtestlabvirtualmachine_info: + resource_group: "{{ resource_group }}" + lab_name: "dev-{{ rpfx }}" + name: "vm-{{ rpfx }}" + register: output + +- name: Assert the devtest lab virtualmachine facts + assert: + that: + - output.virtualmachines | length == 1 + - output.virtualmachines[0].provisioning_state == "Succeeded" + - output.virtualmachines[0].expiration_date == "2025-10-22T01:49:12.117974Z" + +- name: create a instance devtest lab image + azure_rm_devtestlabcustomimage: + resource_group: "{{ resource_group }}" + lab_name: "dev-{{ rpfx }}" + name: "image-{{ rpfx }}" + source_vm: "vm-{{ rpfx }}" + linux_os_state: non_deprovisioned + register: output + +- name: Assert the devtest lab image create successufully + assert: + that: + - output.changed + +- name: Get custom image + azure_rm_devtestlabcustomimage_info: + resource_group: "{{ resource_group }}" + lab_name: "dev-{{ rpfx }}" + name: "image-{{ rpfx }}" + register: output + +- name: Assert the devtest lab image facts + assert: + that: + - output.custom_images | length == 1 + - output.custom_images[0].lab_name + - output.custom_images[0].id + +- name: Get devtest lab armtemplate facts + azure_rm_devtestlabarmtemplate_info: + resource_group: "{{ resource_group }}" + lab_name: "dev-{{ rpfx }}" + artifact_source_name: "public environment repo" + register: output + +- name: Assert the devtest lab armtemplate is returned successfully + assert: + that: + - output.armtemplates | length >= 1 + +- name: Create a Devtest lab policy + azure_rm_devtestlabpolicy: + resource_group: "{{ resource_group }}" + lab_name: "dev-{{ rpfx }}" + policy_set_name: default + name: "policy-{{ rpfx }}" + fact_name: user_owned_lab_vm_count + threshold: 5 + register: output + +- name: Assert the devtest lab policy creatre successfully + assert: + that: + - output.changed + +- name: Get the devtest lab policy facts + azure_rm_devtestlabpolicy_info: + resource_group: "{{ resource_group }}" + lab_name: "dev-{{ rpfx }}" + policy_set_name: default + register: output + +- name: Assert the devtest lab policy facts + assert: + that: + - output.policies | length >= 1 + +- name: Create a Devtest lab schedule + azure_rm_devtestlabschedule: + resource_group: "{{ resource_group }}" + lab_name: "dev-{{ rpfx }}" + name: lab_vms_shutdown + time: "1030" + time_zone_id: "UTC+12" + register: output + +- name: Assert the devtest lab schedule create sussessfully + assert: + that: + - output.changed + +- name: Get devtest lab schedule + azure_rm_devtestlabschedule_info: + resource_group: "{{ resource_group }}" + lab_name: "dev-{{ rpfx }}" + name: lab_vms_shutdown + register: output + +- name: Assert the devtest lab schedule facts + assert: + that: + - output.schedules | length >= 1 + - output.schedules[0].name == "lab_vms_shutdown" diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_diskencryptionset/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_diskencryptionset/aliases new file mode 100644 index 000000000..5d29c6c4d --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_diskencryptionset/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group10 +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_diskencryptionset/lookup_plugins/azure_service_principal_attribute.py b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_diskencryptionset/lookup_plugins/azure_service_principal_attribute.py new file mode 100644 index 000000000..1b7d0318f --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_diskencryptionset/lookup_plugins/azure_service_principal_attribute.py @@ -0,0 +1,94 @@ +# (c) 2018 Yunge Zhu, +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = """ +lookup: azure_service_principal_attribute + +requirements: + - azure-graphrbac + +author: + - Yunge Zhu + +version_added: "2.7" + +short_description: Look up Azure service principal attributes. + +description: + - Describes object id of your Azure service principal account. +options: + azure_client_id: + description: azure service principal client id. + azure_secret: + description: azure service principal secret + azure_tenant: + description: azure tenant + azure_cloud_environment: + description: azure cloud environment +""" + +EXAMPLES = """ +set_fact: + object_id: "{{ lookup('azure_service_principal_attribute', + azure_client_id=azure_client_id, + azure_secret=azure_secret, + azure_tenant=azure_secret) }}" +""" + +RETURN = """ +_raw: + description: + Returns object id of service principal. +""" + +from ansible.errors import AnsibleError +from ansible.plugins import AnsiblePlugin +from ansible.plugins.lookup import LookupBase +from ansible.module_utils._text import to_native + +try: + from azure.common.credentials import ServicePrincipalCredentials + from azure.graphrbac import GraphRbacManagementClient + from msrestazure import azure_cloud + from msrestazure.azure_exceptions import CloudError +except ImportError: + raise AnsibleError( + "The lookup azure_service_principal_attribute requires azure.graphrbac, msrest") + + +class LookupModule(LookupBase): + def run(self, terms, variables, **kwargs): + + self.set_options(direct=kwargs) + + credentials = {} + credentials['azure_client_id'] = self.get_option('azure_client_id', None) + credentials['azure_secret'] = self.get_option('azure_secret', None) + credentials['azure_tenant'] = self.get_option('azure_tenant', 'common') + + if credentials['azure_client_id'] is None or credentials['azure_secret'] is None: + raise AnsibleError("Must specify azure_client_id and azure_secret") + + _cloud_environment = azure_cloud.AZURE_PUBLIC_CLOUD + if self.get_option('azure_cloud_environment', None) is not None: + cloud_environment = azure_cloud.get_cloud_from_metadata_endpoint(credentials['azure_cloud_environment']) + + try: + azure_credentials = ServicePrincipalCredentials(client_id=credentials['azure_client_id'], + secret=credentials['azure_secret'], + tenant=credentials['azure_tenant'], + resource=_cloud_environment.endpoints.active_directory_graph_resource_id) + + client = GraphRbacManagementClient(azure_credentials, credentials['azure_tenant'], + base_url=_cloud_environment.endpoints.active_directory_graph_resource_id) + + response = list(client.service_principals.list(filter="appId eq '{0}'".format(credentials['azure_client_id']))) + sp = response[0] + + return sp.object_id.split(',') + except CloudError as ex: + raise AnsibleError("Failed to get service principal object id: %s" % to_native(ex)) + return False diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_diskencryptionset/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_diskencryptionset/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_diskencryptionset/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_diskencryptionset/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_diskencryptionset/tasks/main.yml new file mode 100644 index 000000000..79055e213 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_diskencryptionset/tasks/main.yml @@ -0,0 +1,138 @@ +- name: Prepare random number + set_fact: + set_name: "des{{ resource_group | hash('md5') | truncate(22, True, '') }}" + rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + tenant_id: "{{ azure_tenant }}" + run_once: yes + +- name: lookup service principal object id + set_fact: + object_id: "{{ lookup('azure_service_principal_attribute', + azure_client_id=azure_client_id, + azure_secret=azure_secret, + azure_tenant=tenant_id) }}" + register: object_id_facts + +- name: create a key vault + azure_rm_keyvault: + resource_group: "{{ resource_group }}" + vault_name: "myvault{{ rpfx }}" + enabled_for_disk_encryption: yes + vault_tenant: "{{ tenant_id }}" + sku: + name: standard + family: A + access_policies: + - tenant_id: "{{ tenant_id }}" + object_id: "{{ object_id }}" + keys: + - get + - list + - wrapkey + - unwrapkey + - create + - update + - import + - delete + - backup + - restore + - recover + - purge + +- name: Create a key in key vault + azure_rm_keyvaultkey: + key_name: testkey + keyvault_uri: https://myvault{{ rpfx }}.vault.azure.net + +- name: Get latest version of key + azure_rm_keyvaultkey_info: + vault_uri: https://myvault{{ rpfx }}.vault.azure.net + name: testkey + register: results + +- set_fact: + key_url: "{{ results['keys'][0]['kid'] }}" + +- name: create disk encryption set + azure_rm_diskencryptionset: + resource_group: "{{ resource_group }}" + name: "{{ set_name }}" + source_vault: "myvault{{ rpfx }}" + key_url: "{{ key_url }}" + state: present + register: results + +- name: Assert that disk encryption set is created + assert: + that: results.changed + +- name: create disk encryption set (Idempotent test) + azure_rm_diskencryptionset: + resource_group: "{{ resource_group }}" + name: "{{ set_name }}" + source_vault: "myvault{{ rpfx }}" + key_url: "{{ key_url }}" + state: present + register: results + +- name: Assert that output is not changed + assert: + that: not results.changed + +- name: Update disk encryption set + azure_rm_diskencryptionset: + resource_group: "{{ resource_group }}" + name: "{{ set_name }}" + source_vault: "myvault{{ rpfx }}" + key_url: "{{ key_url }}" + state: present + tags: + key1: "value1" + register: results + +- name: Assert that disk encryption set is updated + assert: + that: results.changed + +- name: Get disk encryption set facts + azure_rm_diskencryptionset_info: + resource_group: "{{ resource_group }}" + name: "{{ set_name }}" + register: results + +- assert: + that: + - not results.changed + - results.diskencryptionsets[0].id != None + - results.diskencryptionsets[0].name == "{{ set_name }}" + - results.diskencryptionsets[0].active_key != None + - results.diskencryptionsets[0].provisioning_state == "Succeeded" + - results.diskencryptionsets[0].tags | length > 0 + +- name: Delete disk encryption set + azure_rm_diskencryptionset: + resource_group: "{{ resource_group }}" + name: "{{ set_name }}" + state: absent + register: results + +- name: Assert that disk encryption set is deleted + assert: + that: results.changed + +- name: Delete disk encryption set (Idempotent test) + azure_rm_diskencryptionset: + resource_group: "{{ resource_group }}" + name: "{{ set_name }}" + state: absent + register: results + +- name: Asset that output is not changed + assert: + that: not results.changed + +- name: Delete the Key Vault + azure_rm_keyvault: + resource_group: "{{ resource_group }}" + vault_name: "myvault{{ rpfx }}" + state: absent diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_dnsrecordset/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_dnsrecordset/aliases new file mode 100644 index 000000000..8f7a9a2e5 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_dnsrecordset/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group1 +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_dnsrecordset/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_dnsrecordset/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_dnsrecordset/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_dnsrecordset/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_dnsrecordset/tasks/main.yml new file mode 100644 index 000000000..6b9acee8d --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_dnsrecordset/tasks/main.yml @@ -0,0 +1,207 @@ +- name: Create random domain name + set_fact: + domain_name: "{{ resource_group | hash('md5') | truncate(16, True, '') + (65535 | random | string) }}" + +- name: Create a DNS zone + azure_rm_dnszone: + resource_group: "{{ resource_group }}" + name: "{{ domain_name }}.com" + state: present + register: results + +- name: Assert that DNS zone was created + assert: + that: results.changed + +- name: create "A" record set with multiple records + azure_rm_dnsrecordset: + resource_group: "{{ resource_group }}" + relative_name: www + zone_name: "{{ domain_name }}.com" + record_type: A + records: + - entry: 192.168.100.101 + - entry: 192.168.100.102 + - entry: 192.168.100.103 + register: results + +- name: Assert that A record set was created + assert: + that: results.changed + +- name: re-run "A" record with same values + azure_rm_dnsrecordset: + resource_group: "{{ resource_group }}" + relative_name: www + zone_name: "{{ domain_name }}.com" + record_type: A + records: + - entry: 192.168.100.101 + - entry: 192.168.100.102 + - entry: 192.168.100.103 + register: results + +- name: Assert that A record set was not changed + assert: + that: not results.changed + +- name: Update "A" record set with additional record + azure_rm_dnsrecordset: + resource_group: "{{ resource_group }}" + relative_name: www + zone_name: "{{ domain_name }}.com" + record_type: A + record_mode: append + records: + - entry: 192.168.100.104 + register: results + +- name: Assert that new record was appended + assert: + that: + - results.changed + +- name: re-update "A" record set with additional record + azure_rm_dnsrecordset: + resource_group: "{{ resource_group }}" + relative_name: www + zone_name: "{{ domain_name }}.com" + record_type: A + record_mode: append + records: + - entry: 192.168.100.104 + register: results + +- name: Assert that A record set was not changed + assert: + that: + - not results.changed + +- name: Remove 1 record from record set + azure_rm_dnsrecordset: + resource_group: "{{ resource_group }}" + relative_name: www + zone_name: "{{ domain_name }}.com" + record_type: A + records: + - entry: 192.168.100.101 + - entry: 192.168.100.102 + - entry: 192.168.100.103 + register: results + +- name: Assert that record was deleted + assert: + that: + - results.changed + +- name: Check_mode test + azure_rm_dnsrecordset: + resource_group: "{{ resource_group }}" + relative_name: www + zone_name: "{{ domain_name }}.com" + record_type: A + records: + - entry: 192.168.100.105 + check_mode: yes + register: results + +- name: Assert that check_mode returns new state + assert: + that: + - results.changed + +# FUTURE: add facts module calls to ensure that we really didn't touch anything + +- name: delete a record set + azure_rm_dnsrecordset: + resource_group: "{{ resource_group }}" + relative_name: www + zone_name: "{{ domain_name }}.com" + record_type: A + state: absent + register: results + +- name: Assert that record set deleted + assert: + that: results.changed + +- name: (idempotence test) re-run record set absent + azure_rm_dnsrecordset: + resource_group: "{{ resource_group }}" + relative_name: www + zone_name: "{{ domain_name }}.com" + record_type: A + state: absent + register: results + +- name: + assert: + that: not results.changed + +- name: create SRV records in a new record set + azure_rm_dnsrecordset: + resource_group: "{{ resource_group }}" + relative_name: "_sip._tcp.{{ domain_name }}.com" + zone_name: "{{ domain_name }}.com" + time_to_live: 7200 + record_type: SRV + state: present + records: + - entry: sip.{{ domain_name }}.com + priority: 20 + weight: 10 + port: 5060 + register: results + +- name: Assert that SRV record set was created + assert: + that: + - results.changed + +- name: create TXT records in a new record set + azure_rm_dnsrecordset: + resource_group: "{{ resource_group }}" + relative_name: "_txt.{{ domain_name }}.com" + zone_name: "{{ domain_name }}.com" + record_type: TXT + state: present + records: + - entry: "v=spf1 a -all" + - entry: "foo" + - entry: + - "bar" + - "baz" + register: results + +- name: Assert that TXT record set was created + assert: + that: + - results.changed + +- name: Update SOA record + azure_rm_dnsrecordset: + resource_group: "{{ resource_group }}" + relative_name: "@" + zone_name: "{{ domain_name }}.com" + record_type: SOA + state: present + records: + - host: ns1-99.example.com. + email: azuredns-hostmaster99.example.com + serial_number: 99 + refresh_time: 3699 + retry_time: 399 + expire_time: 2419299 + minimum_ttl: 399 + register: results + +- name: Assert that SOA record set was created + assert: + that: + - results.changed + +- name: Delete DNS zone + azure_rm_dnszone: + resource_group: "{{ resource_group }}" + name: "{{ domain_name }}.com" + state: absent diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_dnszone/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_dnszone/aliases new file mode 100644 index 000000000..b048b01fe --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_dnszone/aliases @@ -0,0 +1,6 @@ +cloud/azure +shippable/azure/group2 +destructive +azure_rm_dnszone_info +azure_rm_dnsrecordset +azure_rm_dnsrecordset_info diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_dnszone/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_dnszone/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_dnszone/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_dnszone/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_dnszone/tasks/main.yml new file mode 100644 index 000000000..3f38ca554 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_dnszone/tasks/main.yml @@ -0,0 +1,300 @@ +- name: Create random domain name + set_fact: + domain_name: "{{ resource_group | hash('md5') | truncate(16, True, '') + (65535 | random | string) }}" + +- name: Create a DNS zone (check mode) + azure_rm_dnszone: + resource_group: "{{ resource_group }}" + name: "{{ domain_name }}.com" + register: results + check_mode: yes + +- assert: + that: results.changed + +- name: Create a DNS zone + azure_rm_dnszone: + resource_group: "{{ resource_group }}" + name: "{{ domain_name }}.com" + register: results + +- assert: + that: results.changed + +- name: Update DNS zone with tags + azure_rm_dnszone: + resource_group: "{{ resource_group }}" + name: "{{ domain_name }}.com" + tags: + test: modified + register: results + +- assert: + that: + - results.changed + - results.state.tags.test == 'modified' + +- name: Retrieve DNS Zone Facts + azure_rm_dnszone_info: + resource_group: "{{ resource_group }}" + name: "{{ domain_name }}.com" + register: zones + +- name: Assert that facts module returned result + assert: + that: + - zones.dnszones[0].tags.test == 'modified' + - zones.dnszones[0].type == 'public' + +- name: Test idempotent + azure_rm_dnszone: + name: "{{ domain_name }}.com" + resource_group: "{{ resource_group }}" + register: results + +- assert: + that: + - not results.changed + +# +# azure_rm_dnsrecordset test +# + +- name: create "A" record set with multiple records + azure_rm_dnsrecordset: + resource_group: "{{ resource_group }}" + relative_name: www + zone_name: "{{ domain_name }}.com" + record_type: A + records: + - entry: 192.168.100.101 + - entry: 192.168.100.102 + - entry: 192.168.100.103 + register: results + +- name: Assert that A record set was created + assert: + that: + - results.changed + - 'results.state.a_records | length == 3' + +- name: re-run "A" record with same values + azure_rm_dnsrecordset: + resource_group: "{{ resource_group }}" + relative_name: www + zone_name: "{{ domain_name }}.com" + record_type: A + records: + - entry: 192.168.100.101 + - entry: 192.168.100.102 + - entry: 192.168.100.103 + register: results + +- name: Assert that A record set was not changed + assert: + that: not results.changed + +- name: Update "A" record set with additional record + azure_rm_dnsrecordset: + resource_group: "{{ resource_group }}" + relative_name: www + zone_name: "{{ domain_name }}.com" + record_type: A + record_mode: append + records: + - entry: 192.168.100.104 + register: results + +- name: Assert that new record was appended + assert: + that: + - results.changed + - 'results.state.a_records | length == 4' + +- name: re-update "A" record set with additional record + azure_rm_dnsrecordset: + resource_group: "{{ resource_group }}" + relative_name: www + zone_name: "{{ domain_name }}.com" + record_type: A + record_mode: append + records: + - entry: 192.168.100.104 + register: results + +- name: Assert that A record set was not changed + assert: + that: + - not results.changed + +- name: Remove 1 record from record set + azure_rm_dnsrecordset: + resource_group: "{{ resource_group }}" + relative_name: www + zone_name: "{{ domain_name }}.com" + record_type: A + records: + - entry: 192.168.100.101 + - entry: 192.168.100.102 + - entry: 192.168.100.103 + register: results + +- name: Assert that record was deleted + assert: + that: + - results.changed + - 'results.state.a_records | length == 3' + +- name: Check_mode test + azure_rm_dnsrecordset: + resource_group: "{{ resource_group }}" + relative_name: www + zone_name: "{{ domain_name }}.com" + record_type: A + records: + - entry: 192.168.100.105 + check_mode: yes + register: results + +- name: Assert that check_mode returns new state + assert: + that: + - results.changed + +# FUTURE: add facts module calls to ensure that we really didn't touch anything + +- name: create SRV records in a new record set + azure_rm_dnsrecordset: + resource_group: "{{ resource_group }}" + relative_name: "_sip._tcp.{{ domain_name }}.com" + zone_name: "{{ domain_name }}.com" + time_to_live: 7200 + record_type: SRV + records: + - entry: sip.{{ domain_name }}.com + priority: 20 + weight: 10 + port: 5060 + register: results + +- name: Assert that SRV record set was created + assert: + that: + - results.changed + +- name: create TXT records in a new record set + azure_rm_dnsrecordset: + resource_group: "{{ resource_group }}" + relative_name: "_txt.{{ domain_name }}.com" + zone_name: "{{ domain_name }}.com" + record_type: TXT + records: + - entry: "v=spf1 a -all" + - entry: "foo" + - entry: + - "bar" + - "baz" + register: results + +- name: Assert that TXT record set was created + assert: + that: + - results.changed + +# +# azure_rm_dnsrecordset_info +# + +- name: Retrieve DNS Record Set Facts for single Record Set + azure_rm_dnsrecordset_info: + resource_group: "{{ resource_group }}" + zone_name: "{{ domain_name }}.com" + relative_name: www + record_type: A + register: results + +- name: Assert that facts module returned result for single Record Set + assert: + that: + - not results.changed +# - azure_dnsrecordset[0].name == 'www' + - results.dnsrecordsets[0].relative_name == 'www' + - 'results.dnsrecordsets[0].records | length == 3' + - results.dnsrecordsets[0].record_type == 'A' + +- name: Retrieve DNS Record Set Facts for all Record Sets + azure_rm_dnsrecordset_info: + resource_group: "{{ resource_group }}" + zone_name: "{{ domain_name }}.com" + register: facts + +- name: Assert that facts module returned result for all Record Sets + assert: + that: + - not facts.changed +# - facts.ansible_facts.azure_dnsrecordset[0].name == '@' +# - facts.ansible_facts.azure_dnsrecordset[1].name == '@' +# - facts.ansible_facts.azure_dnsrecordset[4].name == 'www' + +# +# azure_rm_dnsrecordset cleanup +# +- name: delete all record sets except for @ + azure_rm_dnsrecordset: + resource_group: "{{ resource_group }}" + relative_name: "{{ item.relative_name }}" + zone_name: "{{ domain_name }}.com" + record_type: "{{ item.record_type }}" + state: absent + with_items: "{{ facts.dnsrecordsets }}" + when: + - item.relative_name != '@' + register: results + +- name: Assert that record set deleted + assert: + that: results.changed + +- name: Retrieve DNS Record Set Facts for all Record Sets + azure_rm_dnsrecordset_info: + resource_group: "{{ resource_group }}" + zone_name: "{{ domain_name }}.com" + register: facts + +- name: Assert all record set deleted + assert: + that: + - item.relative_name == '@' + with_items: "{{ facts.dnsrecordsets }}" + +- name: (idempotence test) re-run record set absent + azure_rm_dnsrecordset: + resource_group: "{{ resource_group }}" + relative_name: www + zone_name: "{{ domain_name }}.com" + record_type: A + state: absent + register: results + +- name: + assert: + that: not results.changed + +# +# azure_rm_dnszone cleanup +# +- name: Delete DNS zone + azure_rm_dnszone: + resource_group: "{{ resource_group }}" + name: "{{ domain_name }}.com" + state: absent + +- name: Delete DNS zone (idempotent) + azure_rm_dnszone: + resource_group: "{{ resource_group }}" + name: "{{ domain_name }}.com" + state: absent + register: results + +- assert: + that: not results.changed diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_eventhub/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_eventhub/aliases new file mode 100644 index 000000000..5cf25760d --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_eventhub/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group10 +destructive \ No newline at end of file diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_eventhub/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_eventhub/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_eventhub/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_eventhub/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_eventhub/tasks/main.yml new file mode 100644 index 000000000..484f0146d --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_eventhub/tasks/main.yml @@ -0,0 +1,145 @@ +- name: Create random event hub and namespace + set_fact: + namespace_name: "test{{ resource_group | hash('md5') | truncate(16, True, '') + (65535 | random | string) }}" + name: "test{{ resource_group | hash('md5') | truncate(16, True, '') + (65535 | random | string) }}" + +- name: Create Event Hub Namespace (check mode) + azure_rm_eventhub: + location: eastus2 + namespace_name: "{{ namespace_name }}" + resource_group: "{{ resource_group }}" + sku: "Basic" + check_mode: yes + register: results + +- assert: + that: results.changed + +- name: Create Event Hub (check mode) + azure_rm_eventhub: + location: eastus2 + namespace_name: "{{ namespace_name }}" + name: "{{ name }}" + resource_group: "{{ resource_group }}" + sku: "Basic" + check_mode: yes + register: results + +- assert: + that: results.changed + +- name: Create Event Hub Namespace + azure_rm_eventhub: + location: eastus2 + namespace_name: "{{ namespace_name }}" + resource_group: "{{ resource_group }}" + sku: "Basic" + register: results + +- assert: + that: results.changed + +- name: Create Event Hub + azure_rm_eventhub: + location: eastus2 + namespace_name: "{{ namespace_name }}" + name: "{{ name }}" + resource_group: "{{ resource_group }}" + register: results + +- assert: + that: results.changed + +- name: Update Namespace + azure_rm_eventhub: + location: eastus2 + namespace_name: "{{ namespace_name }}" + resource_group: "{{ resource_group }}" + sku: "Standard" + tags: + test: modified + register: results + +- assert: + that: + - results.changed + - results.state.tags.test == 'modified' + +- name: Update Event Hub + azure_rm_eventhub: + location: eastus2 + namespace_name: "{{ namespace_name }}" + name: "{{ name }}" + resource_group: "{{ resource_group }}" + message_retention_in_days: 4 + state: present + tags: + test: modified + register: results + +- assert: + that: + - results.changed + +- name: Retrieve Namespace + azure_rm_eventhub_info: + namespace_name: "{{ namespace_name }}" + resource_group: "{{ resource_group }}" + register: results + +- name: Test idempotent + azure_rm_eventhub: + location: eastus2 + namespace_name: "{{ namespace_name }}" + resource_group: "{{ resource_group }}" + sku: "Standard" + tags: + test: modified + register: results + +- assert: + that: + - not results.changed + +#cleanup +- name: Delete Event Hub + azure_rm_eventhub: + location: eastus2 + namespace_name: "{{ namespace_name }}" + name: "{{ name }}" + resource_group: "{{ resource_group }}" + state: absent + +- name: Delete Event Hub (idempotent) + azure_rm_eventhub: + location: eastus2 + namespace_name: "{{ namespace_name }}" + name: "{{ name }}" + resource_group: "{{ resource_group }}" + state: absent + register: results + +- assert: + that: not results.changed + +- name: Delete Namespace + azure_rm_eventhub: + location: eastus2 + namespace_name: "{{ namespace_name }}" + resource_group: "{{ resource_group }}" + state: absent + +- name: Pause for 1 minutes to check namespace deleted + pause: + minutes: 1 + +- name: Delete Namespace (idempotent) + azure_rm_eventhub: + location: eastus2 + namespace_name: "{{ namespace_name }}" + resource_group: "{{ resource_group }}" + state: absent + register: results + +- assert: + that: not results.changed diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_expressroute/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_expressroute/aliases new file mode 100644 index 000000000..5d29c6c4d --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_expressroute/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group10 +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_expressroute/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_expressroute/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_expressroute/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_expressroute/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_expressroute/tasks/main.yml new file mode 100644 index 000000000..d90dbfdf6 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_expressroute/tasks/main.yml @@ -0,0 +1,119 @@ +- name: Create random express route + set_fact: + express_route: "test{{ resource_group | hash('md5') | truncate(16, True, '') + (65535 | random | string) }}" + +- name: Create Express route (check mode) + azure_rm_expressroute: + location: eastus + name: "{{ express_route }}" + resource_group: "{{ resource_group }}" + allow_classic_operations: true + global_reach_enabled: false + tags: + a: b + authorizations: + - name: authorization_test + service_provider_properties: + service_provider_name: Aryaka Networks + peering_location: Seattle + bandwidth_in_mbps: '200' + sku: + tier: premium + family: metereddata + register: results + check_mode: yes + +- assert: + that: results.changed + + +- name: Create Express route + azure_rm_expressroute: + location: eastus + name: "{{ express_route }}" + resource_group: "{{ resource_group }}" + allow_classic_operations: true + global_reach_enabled: false + tags: + a: b + authorizations: + - name: authorization_test + service_provider_properties: + service_provider_name: Aryaka Networks + peering_location: Seattle + bandwidth_in_mbps: '200' + sku: + tier: premium + family: metereddata + register: results + +- assert: + that: results.changed + + +- name: Update Express route + azure_rm_expressroute: + location: eastus + name: "{{ express_route }}" + resource_group: "{{ resource_group }}" + allow_classic_operations: true + global_reach_enabled: false + tags: + test: modified + authorizations: + - name: authorization_test + service_provider_properties: + service_provider_name: Aryaka Networks + peering_location: Seattle + bandwidth_in_mbps: '200' + sku: + tier: premium + family: metereddata + register: results + +- assert: + that: + - results.changed + - results.state.tags.test == 'modified' + + +- name: Retrieve Express Route + azure_rm_expressroute_info: + resource_group: "{{ resource_group }}" + name: "{{ express_route }}" + register: results + +- name: Assert that facts module returned result + assert: + that: + - results.expressroute[0].tags.test == 'modified' + - results.expressroute[0].type == 'Microsoft.Network/expressRouteCircuits' + +- name: Test idempotent + azure_rm_expressroute: + name: "{{ express_route }}" + resource_group: "{{ resource_group }}" + register: results + +- assert: + that: + - not results.changed + +# +# azure_rm_expressroute cleanup +# +- name: Delete Express Route + azure_rm_expressroute: + resource_group: "{{ resource_group }}" + name: "{{ express_route }}" + state: absent + +- name: Delete Express Route (idempotent) + azure_rm_expressroute: + resource_group: "{{ resource_group }}" + name: "{{ express_route }}" + state: absent + register: results + +- assert: + that: not results.changed diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_firewallpolicy/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_firewallpolicy/aliases new file mode 100644 index 000000000..5d29c6c4d --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_firewallpolicy/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group10 +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_firewallpolicy/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_firewallpolicy/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_firewallpolicy/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_firewallpolicy/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_firewallpolicy/tasks/main.yml new file mode 100644 index 000000000..4bbba792e --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_firewallpolicy/tasks/main.yml @@ -0,0 +1,100 @@ +- name: Create firewall policy name + set_fact: + policy_name: "fp{{ resource_group | hash('md5') | truncate(22, True, '') }}" + +- name: Create firewall policy + azure_rm_firewallpolicy: + resource_group: "{{ resource_group }}" + name: "{{ policy_name }}" + threat_intel_mode: alert + threat_intel_whitelist: + ip_addresses: + - 10.0.0.1 + - 10.0.0.2 + fqdns: + - "*.microsoft.com" + - "*.azure.com" + state: present + register: results + +- name: Assert that firewall policy is created + assert: + that: results.changed + +- name: Create a firewall policy again (Idempotent test) + azure_rm_firewallpolicy: + resource_group: "{{ resource_group }}" + name: "{{ policy_name }}" + threat_intel_mode: alert + threat_intel_whitelist: + ip_addresses: + - 10.0.0.1 + - 10.0.0.2 + fqdns: + - "*.microsoft.com" + - "*.azure.com" + state: present + register: results + +- name: Assert that output is not changed + assert: + that: not results.changed + +- name: Update a firewall policy + azure_rm_firewallpolicy: + resource_group: "{{ resource_group }}" + name: "{{ policy_name }}" + threat_intel_mode: deny + threat_intel_whitelist: + ip_addresses: + - 10.0.0.1 + append_ip_addresses: false + fqdns: + - "*.microsoft.com" + append_fqdns: false + state: present + tags: + key1: "value1" + register: results + +- name: Assert that firewall policy is updated + assert: + that: results.changed + +- name: Get firewall policy facts + azure_rm_firewallpolicy_info: + resource_group: "{{ resource_group }}" + name: "{{ policy_name }}" + register: results + +- assert: + that: + - not results.changed + - results.firewallpolicies[0].id != None + - results.firewallpolicies[0].name == "{{ policy_name }}" + - results.firewallpolicies[0].threat_intel_mode == "Deny" + - results.firewallpolicies[0].threat_intel_whitelist.ip_addresses == ["10.0.0.1"] + - results.firewallpolicies[0].threat_intel_whitelist.fqdns == ["*.microsoft.com"] + - results.firewallpolicies[0].tags | length > 0 + +- name: Delete the firewall policy + azure_rm_firewallpolicy: + resource_group: "{{ resource_group }}" + name: "{{ policy_name }}" + state: absent + register: results + +- name: Assert that firewall policy is deleted + assert: + that: results.changed + +- name: Delete firewall policy again (Idempotent test) + azure_rm_firewallpolicy: + resource_group: "{{ resource_group }}" + name: "{{ policy_name }}" + state: absent + register: results + +- name: Asset that output is not changed + assert: + that: not results.changed diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_functionapp/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_functionapp/aliases new file mode 100644 index 000000000..759eafa2d --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_functionapp/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group3 +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_functionapp/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_functionapp/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_functionapp/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_functionapp/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_functionapp/tasks/main.yml new file mode 100644 index 000000000..e618e7038 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_functionapp/tasks/main.yml @@ -0,0 +1,131 @@ +- name: Fix resource prefix + set_fact: + fixed_resource_prefix: "fa{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + +- name: Fix resource prefix + set_fact: + funcapp_name_basic: "fa{{ fixed_resource_prefix }}basic" + funcapp_name_container: "fa{{ fixed_resource_prefix }}container" + funcapp_name_params: "fa{{ fixed_resource_prefix }}params" + storage_account_name: "sa{{ fixed_resource_prefix }}" + plan_name: "ap{{ fixed_resource_prefix }}" + +- name: create storage account for function apps + azure_rm_storageaccount: + resource_group: '{{ resource_group }}' + name: "{{ storage_account_name }}" + account_type: Standard_LRS + +- name: create basic function app + azure_rm_functionapp: + resource_group: "{{ resource_group }}" + name: "{{ funcapp_name_basic }}" + storage_account: "{{ storage_account_name }}" + register: output + +- name: assert the function was created + assert: + that: output.changed + +- name: list facts for function + azure_rm_functionapp_info: + resource_group: '{{ resource_group }}' + name: "{{ funcapp_name_basic }}" + register: results + +- name: assert the facts were retrieved + assert: + that: + - results.ansible_info.azure_functionapps|length == 1 + - results.ansible_info.azure_functionapps[0].name == "{{ funcapp_name_basic }}" + +- name: delete basic function app + azure_rm_functionapp: + resource_group: '{{ resource_group }}' + name: "{{ funcapp_name_basic }}" + state: absent + register: output + +- name: assert the function was deleted + assert: + that: output.changed + +- name: create a function with app settings + azure_rm_functionapp: + resource_group: '{{ resource_group }}' + name: "{{ funcapp_name_params }}" + storage_account: "{{ storage_account_name }}" + app_settings: + hello: world + things: more stuff + FUNCTIONS_EXTENSION_VERSION: "~2" + register: output + +- name: assert the function with app settings was created + assert: + that: output.changed + +- name: change app settings + azure_rm_functionapp: + resource_group: '{{ resource_group }}' + name: "{{ funcapp_name_params }}" + storage_account: "{{ storage_account_name }}" + app_settings: + hello: world + things: more stuff + FUNCTIONS_EXTENSION_VERSION: "~2" + another: one + register: output + +- name: assert the function was changed + assert: + that: output.changed + +- name: delete the function app + azure_rm_functionapp: + resource_group: '{{ resource_group }}' + name: "{{ funcapp_name_params }}" + state: absent + register: output + +- name: assert the function was deleted + assert: + that: output.changed + +- name: Create a linux app service plan + azure_rm_appserviceplan: + resource_group: "{{ resource_group }}" + name: "{{ plan_name }}" + sku: S1 + is_linux: true + number_of_workers: 1 + +- name: "Create azure function app {{ function_app }}" + azure_rm_functionapp: + resource_group: "{{ resource_group }}" + name: "{{ funcapp_name_container }}" + storage_account: "{{ storage_account_name }}" + plan: + resource_group: "{{ resource_group }}" + name: "{{ plan_name }}" + container_settings: + name: httpd + app_settings: + FUNCTIONS_EXTENSION_VERSION: "~2" + register: output + +- name: assert the function was changed + assert: + that: output.changed + +- name: delete the function app + azure_rm_functionapp: + resource_group: '{{ resource_group }}' + name: "{{ funcapp_name_container }}" + state: absent + +- name: delete storage account + azure_rm_storageaccount: + resource_group: '{{ resource_group }}' + name: "{{ storage_account_name }}" + state: absent diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_gallery/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_gallery/aliases new file mode 100644 index 000000000..df49fa1ef --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_gallery/aliases @@ -0,0 +1,6 @@ +cloud/azure +shippable/azure/group4 +destructive +azure_rm_galleryimage +azure_rm_galleryimageversion +azure_rm_snapshot diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_gallery/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_gallery/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_gallery/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_gallery/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_gallery/tasks/main.yml new file mode 100644 index 000000000..f67fa218f --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_gallery/tasks/main.yml @@ -0,0 +1,370 @@ +- name: Prepare random number + set_fact: + rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + run_once: yes + +- name: Create virtual network + azure_rm_virtualnetwork: + resource_group: "{{ resource_group }}" + name: "testVnet{{ rpfx }}" + address_prefixes: "10.0.0.0/16" + +- name: Add subnet + azure_rm_subnet: + resource_group: "{{ resource_group }}" + name: "testSubnet{{ rpfx }}" + address_prefix: "10.0.1.0/24" + virtual_network: "testVnet{{ rpfx }}" + +- name: Create public IP address + azure_rm_publicipaddress: + resource_group: "{{ resource_group }}" + allocation_method: Static + name: "testPublicIP{{ rpfx }}" + +- name: Create virtual network inteface cards for VM A and B + azure_rm_networkinterface: + resource_group: "{{ resource_group }}" + name: "vmforimage{{ rpfx }}nic" + virtual_network: "testVnet{{ rpfx }}" + subnet: "testSubnet{{ rpfx }}" + +- name: Create VM + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "vmforimage{{ rpfx }}" + location: eastus + admin_username: testuser + ssh_password_enabled: false + ssh_public_keys: + - path: /home/testuser/.ssh/authorized_keys + key_data: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfoYlIV4lTPZTv7hXaVwQQuqBgGs4yeNRX0SPo2+HQt9u4X7IGwrtXc0nEUm6LfaCikMH58bOL8f20NTGz285kxdFHZRcBXtqmnMz2rXwhK9gwq5h1khc+GzHtdcJXsGA4y0xuaNcidcg04jxAlN/06fwb/VYwwWTVbypNC0gpGEpWckCNm8vlDlA55sU5et0SZ+J0RKVvEaweUOeNbFZqckGPA384imfeYlADppK/7eAxqfBVadVvZG8IJk4yvATgaIENIFj2cXxqu2mQ/Bp5Wr45uApvJsFXmi+v/nkiOEV1QpLOnEwAZo6EfFS4CCQtsymxJCl1PxdJ5LD4ZOtP xiuxi.sun@qq.com" + vm_size: Standard_B1ms + network_interfaces: "vmforimage{{ rpfx }}nic" + image: + offer: UbuntuServer + publisher: Canonical + sku: 16.04-LTS + version: latest + +- name: Get VM facts + azure_rm_virtualmachine_info: + resource_group: "{{ resource_group }}" + name: "vmforimage{{ rpfx }}" + register: output + +- name: Pause for 10 mimutes to VM updating + shell: sleep 600 + +- name: Create a snapshot by importing an unmanaged blob from the same subscription. + azure_rm_snapshot: + resource_group: "{{ resource_group }}" + name: "mySnapshot-{{ rpfx }}" + location: eastus + creation_data: + create_option: Import + source_uri: 'https://{{ output.vms[0].storage_account_name }}.blob.core.windows.net/{{ output.vms[0].storage_container_name }}/{{ output.vms[0].storage_blob_name }}' + register: output + +- assert: + that: + - output.changed + +- name: Create a managed disk + azure_rm_manageddisk: + resource_group: "{{ resource_group }}" + name: "disk-{{ rpfx }}" + disk_size_gb: 1 + register: disk_output + +- name: Create a snapshot with I(incremental=True) + azure_rm_snapshot: + resource_group: "{{ resource_group }}" + name: "mySnapshot-{{ rpfx }}02" + location: eastus + incremental: True + creation_data: + create_option: Copy + source_id: "{{ disk_output.state.id }}" + register: output + +- assert: + that: + - output.changed + +- name: Generalize VM + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "vmforimage{{ rpfx }}" + generalized: yes +- name: Create custom image + azure_rm_image: + resource_group: "{{ resource_group }}" + name: testimagea + source: "vmforimage{{ rpfx }}" +- name: Create or update a simple gallery. + azure_rm_gallery: + resource_group: "{{ resource_group }}" + name: myGallery{{ rpfx }} + location: eastus + description: This is the gallery description. + register: output + +- assert: + that: + - output.changed + +- name: Create or update a simple gallery - idempotent + azure_rm_gallery: + resource_group: "{{ resource_group }}" + name: myGallery{{ rpfx }} + location: eastus + description: This is the gallery description. + register: output + +- assert: + that: + - not output.changed + +- name: Create or update a simple gallery - change description + azure_rm_gallery: + resource_group: "{{ resource_group }}" + name: myGallery{{ rpfx }} + location: eastus + description: This is the gallery description - xxx. + register: output + +- assert: + that: + - output.changed + +- name: Get a gallery info. + azure_rm_gallery_info: + resource_group: "{{ resource_group }}" + name: myGallery{{ rpfx }} + register: output + +- assert: + that: + - not output.changed + - output.galleries['id'] != None + - output.galleries['name'] != None + - output.galleries['location'] != None + - output.galleries['description'] != None + - output.galleries['provisioning_state'] != None + +- name: Create or update gallery image + azure_rm_galleryimage: + resource_group: "{{ resource_group }}" + gallery_name: myGallery{{ rpfx }} + name: myImage + location: eastus + os_type: linux + os_state: generalized + identifier: + publisher: myPublisherName + offer: myOfferName + sku: mySkuName + description: Image Description + register: output + +- assert: + that: + - output.changed + +- name: Create or update gallery image - idempotent + azure_rm_galleryimage: + resource_group: "{{ resource_group }}" + gallery_name: myGallery{{ rpfx }} + name: myImage + location: eastus + os_type: linux + os_state: generalized + identifier: + publisher: myPublisherName + offer: myOfferName + sku: mySkuName + description: Image Description + register: output + +- assert: + that: + - not output.changed + +- name: Create or update gallery image - change description + azure_rm_galleryimage: + resource_group: "{{ resource_group }}" + gallery_name: myGallery{{ rpfx }} + name: myImage + location: eastus + os_type: linux + os_state: generalized + identifier: + publisher: myPublisherName + offer: myOfferName + sku: mySkuName + description: Image Description XXXs + register: output + +- assert: + that: + - output.changed + +- name: Get a gallery image info. + azure_rm_galleryimage_info: + resource_group: "{{ resource_group }}" + gallery_name: myGallery{{ rpfx }} + name: myImage + register: output + +- assert: + that: + - not output.changed + - output.images['id'] != None + - output.images['name'] != None + - output.images['location'] != None + - output.images['os_state'] != None + - output.images['os_type'] != None + - output.images['identifier'] != None + +- name: Create or update a simple gallery Image Version. + azure_rm_galleryimageversion: + resource_group: "{{ resource_group }}" + gallery_name: myGallery{{ rpfx }} + gallery_image_name: myImage + name: 10.1.3 + location: eastus + publishing_profile: + end_of_life_date: "2050-10-01t00:00:00+00:00" + exclude_from_latest: yes + replica_count: 3 + storage_account_type: Standard_LRS + target_regions: + - name: eastus + regional_replica_count: 1 + - name: westus + regional_replica_count: 2 + storage_account_type: Standard_ZRS + managed_image: + name: testimagea + resource_group: "{{ resource_group }}" + register: output + +- assert: + that: + - output.changed + +- name: Create or update a simple gallery Image Version - idempotent + azure_rm_galleryimageversion: + resource_group: "{{ resource_group }}" + gallery_name: myGallery{{ rpfx }} + gallery_image_name: myImage + name: 10.1.3 + location: eastus + publishing_profile: + end_of_life_date: "2050-10-01t00:00:00+00:00" + exclude_from_latest: yes + replica_count: 3 + storage_account_type: Standard_LRS + target_regions: + - name: eastus + regional_replica_count: 1 + - name: westus + regional_replica_count: 2 + storage_account_type: Standard_ZRS + managed_image: + name: testimagea + resource_group: "{{ resource_group }}" + register: output + +- assert: + that: + - not output.changed + +- name: Create or update a simple gallery Image Version - change end of life + azure_rm_galleryimageversion: + resource_group: "{{ resource_group }}" + gallery_name: myGallery{{ rpfx }} + gallery_image_name: myImage + name: 10.1.3 + location: eastus + publishing_profile: + end_of_life_date: "2051-10-01t00:00:00+00:00" + exclude_from_latest: yes + replica_count: 3 + storage_account_type: Standard_LRS + target_regions: + - name: eastus + regional_replica_count: 1 + - name: westus + regional_replica_count: 2 + storage_account_type: Standard_ZRS + managed_image: + name: testimagea + resource_group: "{{ resource_group }}" + register: output + +- assert: + that: + - output.changed + +- name: Get a simple gallery Image Version info. + azure_rm_galleryimageversion_info: + resource_group: "{{ resource_group }}" + gallery_name: myGallery{{ rpfx }} + gallery_image_name: myImage + name: 10.1.3 + register: output + +- assert: + that: + - not output.changed + - output.versions['id'] != None + - output.versions['name'] != None + - output.versions['location'] != None + - output.versions['publishing_profile'] != None + - output.versions['provisioning_state'] != None + +- name: Delete gallery image Version. + azure_rm_galleryimageversion: + resource_group: "{{ resource_group }}" + gallery_name: myGallery{{ rpfx }} + gallery_image_name: myImage + name: 10.1.3 + state: absent + register: output + +- assert: + that: + - output.changed + +- name: pasue 2 minutes, wait for deletion complete + pause: + minutes: 2 + +- name: Delete gallery image + azure_rm_galleryimage: + resource_group: "{{ resource_group }}" + gallery_name: myGallery{{ rpfx }} + name: myImage + state: absent + register: output + +- assert: + that: + - output.changed + +- name: pasue 2 minutes, wait for deletion complete + pause: + minutes: 2 + +- name: Delete gallery + azure_rm_gallery: + resource_group: "{{ resource_group }}" + name: myGallery{{ rpfx }} + state: absent + register: output + +- assert: + that: + - output.changed diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_hdinsightcluster/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_hdinsightcluster/aliases new file mode 100644 index 000000000..b61052b30 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_hdinsightcluster/aliases @@ -0,0 +1,6 @@ +cloud/azure +destructive +shippable/azure/group14 +unstable # test is slow (~30 minute run time), not unstable, but this is better than unsupported +azure_rm_hdinsightcluster_info +disabled diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_hdinsightcluster/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_hdinsightcluster/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_hdinsightcluster/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_hdinsightcluster/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_hdinsightcluster/tasks/main.yml new file mode 100644 index 000000000..edb99c4c1 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_hdinsightcluster/tasks/main.yml @@ -0,0 +1,244 @@ +- name: Prepare random number + set_fact: + rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + run_once: yes + +- name: Create storage account + azure_rm_storageaccount: + resource_group: "{{ resource_group }}" + name: "storage{{ rpfx }}" + account_type: Standard_LRS + location: eastus2 + +- name: Sample for Azure REST API - StorageAccounts_ListKeys + azure_rm_resource: + api_version: '2018-07-01' + method: POST + resource_group: "{{ resource_group }}" + provider: storage + resource_type: storageaccounts + resource_name: "storage{{ rpfx }}" + subresource: + - type: listkeys + register: storage_output + +- debug: + var: storage_output + +- name: Create instance of Cluster -- check mode + azure_rm_hdinsightcluster: + resource_group: "{{ resource_group }}" + name: "cluster{{ rpfx }}" + location: eastus2 + cluster_version: 3.6 + os_type: linux + tier: standard + cluster_definition: + kind: spark + gateway_rest_username: http-user + gateway_rest_password: Password123! + storage_accounts: + - name: storage{{ rpfx }}.blob.core.windows.net + is_default: yes + container: "cluster{{ rpfx }}" + key: "{{ storage_output['response']['keys'][0]['value'] }}" + compute_profile_roles: + - name: headnode + target_instance_count: 1 + vm_size: Standard_D3 + linux_profile: + username: sshuser + password: Password123! + - name: workernode + target_instance_count: 1 + vm_size: Standard_D3 + linux_profile: + username: sshuser + password: Password123! + - name: zookeepernode + target_instance_count: 3 + vm_size: Medium + linux_profile: + username: sshuser + password: Password123! + check_mode: yes + register: output +- name: Assert the resource instance is well created + assert: + that: + - output.changed + +- name: Create instance of Cluster + azure_rm_hdinsightcluster: + resource_group: "{{ resource_group }}" + name: "cluster{{ rpfx }}" + location: eastus2 + cluster_version: 3.6 + os_type: linux + tier: standard + cluster_definition: + kind: spark + gateway_rest_username: http-user + gateway_rest_password: Password123! + storage_accounts: + - name: storage{{ rpfx }}.blob.core.windows.net + is_default: yes + container: "cluster{{ rpfx }}" + key: "{{ storage_output['response']['keys'][0]['value'] }}" + compute_profile_roles: + - name: headnode + target_instance_count: 1 + vm_size: Standard_D3 + linux_profile: + username: sshuser + password: Password123! + - name: workernode + target_instance_count: 1 + vm_size: Standard_D3 + linux_profile: + username: sshuser + password: Password123! + - name: zookeepernode + target_instance_count: 3 + vm_size: Medium + linux_profile: + username: sshuser + password: Password123! + register: output + +- name: Assert the resource instance is well created + assert: + that: + - output.changed + +- name: Create again instance of Cluster -- idempotent + azure_rm_hdinsightcluster: + resource_group: "{{ resource_group }}" + name: "cluster{{ rpfx }}" + location: eastus2 + cluster_version: 3.6 + os_type: linux + tier: standard + cluster_definition: + kind: spark + gateway_rest_username: http-user + gateway_rest_password: Password123! + storage_accounts: + - name: storage{{ rpfx }}.blob.core.windows.net + is_default: yes + container: "cluster{{ rpfx }}" + key: "{{ storage_output['response']['keys'][0]['value'] }}" + compute_profile_roles: + - name: headnode + target_instance_count: 1 + vm_size: Standard_D3 + linux_profile: + username: sshuser + password: Password123! + - name: workernode + target_instance_count: 1 + vm_size: Standard_D3 + linux_profile: + username: sshuser + password: Password123! + - name: zookeepernode + target_instance_count: 3 + vm_size: Medium + linux_profile: + username: sshuser + password: Password123! + register: output +- name: Assert the state has not changed + assert: + that: + - output.changed == false + +- name: Create again instance of Cluster -- resize and add tags + azure_rm_hdinsightcluster: + resource_group: "{{ resource_group }}" + name: "cluster{{ rpfx }}" + location: eastus2 + cluster_version: 3.6 + os_type: linux + tier: standard + cluster_definition: + kind: spark + gateway_rest_username: http-user + gateway_rest_password: Password123! + storage_accounts: + - name: storage{{ rpfx }}.blob.core.windows.net + is_default: yes + container: "cluster{{ rpfx }}" + key: "{{ storage_output['response']['keys'][0]['value'] }}" + compute_profile_roles: + - name: headnode + target_instance_count: 1 + vm_size: Standard_D3 + linux_profile: + username: sshuser + password: Password123! + - name: workernode + target_instance_count: 2 + vm_size: Standard_D3 + linux_profile: + username: sshuser + password: Password123! + - name: zookeepernode + target_instance_count: 3 + vm_size: Medium + linux_profile: + username: sshuser + password: Password123! + tags: + aaa: bbb + register: output +- debug: + var: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Get facts of Cluster + azure_rm_hdinsightcluster_info: + resource_group: "{{ resource_group }}" + name: "cluster{{ rpfx }}" + register: output + +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.clusters[0]['id'] != None + - output.clusters[0]['resource_group'] != None + - output.clusters[0]['name'] != None + - output.clusters[0]['location'] != None + - output.clusters[0]['cluster_version'] != None + - output.clusters[0]['os_type'] != None + - output.clusters[0]['tier'] != None + - output.clusters[0]['cluster_definition'] != None + - output.clusters[0]['compute_profile_roles'] != None + - output.clusters[0]['connectivity_endpoints'] != None + +- name: Delete instance of Cluster -- check mode + azure_rm_hdinsightcluster: + resource_group: "{{ resource_group }}" + name: "cluster{{ rpfx }}" + state: absent + check_mode: yes + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Delete instance of Cluster + azure_rm_hdinsightcluster: + resource_group: "{{ resource_group }}" + name: "cluster{{ rpfx }}" + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_hostgroup/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_hostgroup/aliases new file mode 100644 index 000000000..5d29c6c4d --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_hostgroup/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group10 +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_hostgroup/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_hostgroup/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_hostgroup/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_hostgroup/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_hostgroup/tasks/main.yml new file mode 100644 index 000000000..8b2d408eb --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_hostgroup/tasks/main.yml @@ -0,0 +1,87 @@ +- name: Create host group name + set_fact: + group_name: "hostgroup{{ resource_group | hash('md5') | truncate(22, True, '') }}" + +- name: Create a host group + azure_rm_hostgroup: + resource_group: "{{ resource_group }}" + name: "{{ group_name }}" + location: eastus + zones: + - "1" + platform_fault_domain_count: 1 + state: present + register: results + +- name: Assert that host group is created + assert: + that: results.changed + +- name: Create a host group again (Idempotent test) + azure_rm_hostgroup: + resource_group: "{{ resource_group }}" + name: "{{ group_name }}" + location: eastus + zones: + - "1" + platform_fault_domain_count: 1 + state: present + register: results + +- name: Assert that output is not changed + assert: + that: not results.changed + +- name: Update a host group + azure_rm_hostgroup: + resource_group: "{{ resource_group }}" + name: "{{ group_name }}" + location: eastus + zones: + - "1" + platform_fault_domain_count: 1 + state: present + tags: + key1: "value1" + register: results + +- name: Assert that host group is updated + assert: + that: results.changed + +- name: Get host group facts + azure_rm_hostgroup_info: + resource_group: "{{ resource_group }}" + name: "{{ group_name }}" + register: results + +- assert: + that: + - not results.changed + - results.hostgroups[0].name == "{{ group_name }}" + - results.hostgroups[0].location == "eastus" + - results.hostgroups[0].platform_fault_domain_count == 1 + - results.hostgroups[0].zones == ["1"] + - results.hostgroups[0].tags | length > 0 + +- name: Delete host group + azure_rm_hostgroup: + resource_group: "{{ resource_group }}" + name: "{{ group_name }}" + state: absent + register: results + +- name: Assert that host group is deleted + assert: + that: results.changed + +- name: Delete host group again (Idempotent test) + azure_rm_hostgroup: + resource_group: "{{ resource_group }}" + name: "{{ group_name }}" + state: absent + register: results + +- name: Asset that output is not changed + assert: + that: not results.changed diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_image/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_image/aliases new file mode 100644 index 000000000..2d7dea2ce --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_image/aliases @@ -0,0 +1,4 @@ +cloud/azure +shippable/azure/group3 +destructive +azure_rm_image_info diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_image/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_image/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_image/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_image/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_image/tasks/main.yml new file mode 100644 index 000000000..cbe9baa31 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_image/tasks/main.yml @@ -0,0 +1,178 @@ +- name: Create storage account name + set_fact: + vm_name: "vm{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}x" + public_ip_name: "pip{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + security_group_name: "sg{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + empty_disk_name: "emptydisk{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + +- name: Create virtual network + azure_rm_virtualnetwork: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + address_prefixes: "10.10.0.0/16" + +- name: Add subnet + azure_rm_subnet: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + address_prefix: "10.10.0.0/24" + virtual_network: "{{ vm_name }}" + +- name: Create public ip + azure_rm_publicipaddress: + resource_group: "{{ resource_group }}" + allocation_method: Static + name: "{{ public_ip_name }}" + +- name: Create security group + azure_rm_securitygroup: + resource_group: "{{ resource_group }}" + name: "{{ security_group_name }}" + +- name: Create NIC + azure_rm_networkinterface: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + virtual_network: "{{ vm_name }}" + subnet: "{{ vm_name }}" + public_ip_name: "{{ public_ip_name }}" + security_group: "{{ security_group_name }}" + +- name: Create virtual machine + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + vm_size: Standard_B1ms + managed_disk_type: Standard_LRS + admin_username: adminuser + ssh_password_enabled: false + ssh_public_keys: + - path: /home/adminuser/.ssh/authorized_keys + key_data: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfoYlIV4lTPZTv7hXaVwQQuqBgGs4yeNRX0SPo2+HQt9u4X7IGwrtXc0nEUm6LfaCikMH58bOL8f20NTGz285kxdFHZRcBXtqmnMz2rXwhK9gwq5h1khc+GzHtdcJXsGA4y0xuaNcidcg04jxAlN/06fwb/VYwwWTVbypNC0gpGEpWckCNm8vlDlA55sU5et0SZ+J0RKVvEaweUOeNbFZqckGPA384imfeYlADppK/7eAxqfBVadVvZG8IJk4yvATgaIENIFj2cXxqu2mQ/Bp5Wr45uApvJsFXmi+v/nkiOEV1QpLOnEwAZo6EfFS4CCQtsymxJCl1PxdJ5LD4ZOtP xiuxi.sun@qq.com" + os_type: Linux + network_interfaces: "{{ vm_name }}" + image: + offer: UbuntuServer + publisher: Canonical + sku: 16.04-LTS + version: latest + register: vm + +- name: Create new empty managed disk + azure_rm_manageddisk: + resource_group: "{{ resource_group }}" + name: "{{ empty_disk_name }}" + storage_account_type: "Standard_LRS" + disk_size_gb: 1 + register: emptydisk + +- name: Create an image from VM (check mode) + azure_rm_image: + resource_group: "{{ resource_group }}" + source: "{{ vm.ansible_facts.azure_vm.properties.storageProfile.osDisk.managedDisk.id }}" + name: testimage001 + os_type: Linux + hyper_v_generation: V1 + data_disk_sources: + - "{{ empty_disk_name }}" + check_mode: yes + register: output + +- assert: + that: output.changed + +- name: Create an image from VM + azure_rm_image: + resource_group: "{{ resource_group }}" + source: + name: "{{ vm_name }}" + type: disks + name: testimage001 + hyper_v_generation: V1 + os_type: Linux + register: output + +- assert: + that: + - output.changed + - output.id + +- name: Create an image from VM (idempotent) + azure_rm_image: + resource_group: "{{ resource_group }}" + source: "{{ vm.ansible_facts.azure_vm.properties.storageProfile.osDisk.managedDisk.id }}" + name: testimage001 + hyper_v_generation: V1 + os_type: Linux + register: output + +- assert: + that: + - not output.changed + - output.id + +- name: Gather information about image created + azure_rm_image_info: + resource_group: "{{ resource_group }}" + name: testimage001 + register: output + +- assert: + that: + - output.images != [] + - output.images[0].hyper_v_generation == 'V1' + +- name: Delete image (check mode) + azure_rm_image: + resource_group: "{{ resource_group }}" + name: testimage001 + state: absent + register: output + check_mode: yes + +- assert: + that: + - output.changed + +- name: Delete image + azure_rm_image: + resource_group: "{{ resource_group }}" + name: testimage001 + state: absent + register: output + +- assert: + that: + - output.changed + +- name: Delete image (idempotent) + azure_rm_image: + resource_group: "{{ resource_group }}" + name: testimage001 + state: absent + register: output + +- assert: + that: + - not output.changed + +- name: Delete empty disk + azure_rm_manageddisk: + resource_group: "{{ resource_group }}" + name: "{{ empty_disk_name }}" + state: absent + +- name: Delete VM + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + state: absent + vm_size: Standard_A0 + register: output + +- name: Delete public ip + azure_rm_publicipaddress: + resource_group: "{{ resource_group }}" + allocation_method: Static + name: "{{ public_ip_name }}" + state: absent diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_iothub/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_iothub/aliases new file mode 100644 index 000000000..aa77c071a --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_iothub/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group2 +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_iothub/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_iothub/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_iothub/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_iothub/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_iothub/tasks/main.yml new file mode 100644 index 000000000..6cb319050 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_iothub/tasks/main.yml @@ -0,0 +1,178 @@ +- set_fact: + rpfx: "{{ resource_group | hash('md5') | truncate(8, True, '') }}" + +- name: Create IoT Hub (check mode) + azure_rm_iothub: + name: "hub{{ rpfx }}" + resource_group: "{{ resource_group }}" + ip_filters: + - name: filter1 + action: reject + ip_mask: 40.60.80.10 + check_mode: yes + register: iothub + +- assert: + that: + - iothub.changed + +- name: Query IoT Hub + azure_rm_iothub_info: + name: "hub{{ rpfx }}" + resource_group: "{{ resource_group }}" + register: iothub + ignore_errors: yes + +- name: Create IoT Hub + azure_rm_iothub: + name: "hub{{ rpfx }}" + resource_group: "{{ resource_group }}" + ip_filters: + - name: filter1 + action: reject + ip_mask: 40.60.80.10 + register: iothub + +- assert: + that: + - iothub.changed + +- name: Create IoT Hub (idempontent) + azure_rm_iothub: + name: "hub{{ rpfx }}" + resource_group: "{{ resource_group }}" + ip_filters: + - name: filter1 + action: reject + ip_mask: 40.60.80.10 + register: iothub + +- assert: + that: + - not iothub.changed + +- name: Query IoT Hub + azure_rm_iothub_info: + name: "hub{{ rpfx }}" + resource_group: "{{ resource_group }}" + list_keys: yes + register: iothub + +- assert: + that: + - iothub.iothubs | length == 1 + +- set_fact: + registry_write_name: "{{ item.key_name }}" + registry_write_key: "{{ item.primary_key }}" + with_items: "{{ iothub.iothubs[0]['keys'] }}" + when: item.rights == 'RegistryWrite, ServiceConnect, DeviceConnect' + +- name: Create devices + azure_rm_iotdevice: + hub: "hub{{ rpfx }}" + hub_policy_name: "{{ registry_write_name }}" + hub_policy_key: "{{ registry_write_key }}" + name: "mydevice{{ item }}" + twin_tags: + location: + country: US + city: Redmond + sensor: humidity + with_items: + - 1 + - 2 + +- name: Query devices + azure_rm_iotdevice_info: + hub: "hub{{ rpfx }}" + hub_policy_name: "{{ registry_write_name }}" + hub_policy_key: "{{ registry_write_key }}" + register: devices + +- assert: + that: + - devices.iot_devices | length == 2 + +- name: Query devices + azure_rm_iotdevice_info: + hub: "hub{{ rpfx }}" + name: "mydevice1" + hub_policy_name: "{{ registry_write_name }}" + hub_policy_key: "{{ registry_write_key }}" + register: devices + +- assert: + that: + - devices.iot_devices | length == 1 + - devices.iot_devices[0].deviceId == 'mydevice1' + +- name: Pause for 5 mimutes + shell: sleep 300 + +- name: Query devices twin + azure_rm_iotdevice_info: + hub: "hub{{ rpfx }}" + query: "SELECT * FROM devices WHERE tags.location.country = 'US'" + hub_policy_name: "{{ registry_write_name }}" + hub_policy_key: "{{ registry_write_key }}" + register: devices + +- assert: + that: + - devices.iot_devices | length == 2 + +- name: Update devices + azure_rm_iotdevice: + hub: "hub{{ rpfx }}" + hub_policy_name: "{{ registry_write_name }}" + hub_policy_key: "{{ registry_write_key }}" + name: "mydevice{{ item }}" + edge_enabled: yes + twin_tags: + location: + country: China + city: Shanghai + sensor: humidity + with_items: + - 1 + - 3 + +- name: Pause for 5 mimutes + shell: sleep 300 + +- name: Query devices twin + azure_rm_iotdevice_info: + hub: "hub{{ rpfx }}" + query: "SELECT * FROM devices WHERE tags.location.country = 'US'" + hub_policy_name: "{{ registry_write_name }}" + hub_policy_key: "{{ registry_write_key }}" + register: devices + +- assert: + that: + - devices.iot_devices | length == 1 + - devices.iot_devices[0].deviceId == 'mydevice2' + +- name: Delete IoT Hub (check mode) + azure_rm_iothub: + name: "hub{{ rpfx }}" + resource_group: "{{ resource_group }}" + state: absent + check_mode: yes + register: iothub + +- assert: + that: + - iothub.changed + +- name: Delete IoT Hub + azure_rm_iothub: + name: "hub{{ rpfx }}" + resource_group: "{{ resource_group }}" + state: absent + register: iothub + +- assert: + that: + - iothub.changed diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_ipgroup/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_ipgroup/aliases new file mode 100644 index 000000000..5d29c6c4d --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_ipgroup/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group10 +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_ipgroup/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_ipgroup/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_ipgroup/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_ipgroup/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_ipgroup/tasks/main.yml new file mode 100644 index 000000000..8656c7110 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_ipgroup/tasks/main.yml @@ -0,0 +1,106 @@ +- name: Create IP group name + set_fact: + group_name: "ipgroup{{ resource_group | hash('md5') | truncate(22, True, '') }}" + +- name: Create IP group (check mode) + azure_rm_ipgroup: + resource_group: "{{ resource_group }}" + name: "{{ group_name }}" + location: eastus + ip_addresses: + - 13.64.39.16/32 + - 40.74.146.80/31 + - 40.74.147.32/28 + tags: + key1: "value1" + state: present + check_mode: yes + +- name: Create IP group + azure_rm_ipgroup: + resource_group: "{{ resource_group }}" + name: "{{ group_name }}" + location: eastus + ip_addresses: + - 13.64.39.16/32 + - 40.74.146.80/31 + - 40.74.147.32/28 + tags: + key1: "value1" + state: present + register: results + +- name: Assert that IP group is created + assert: + that: results.changed + +- name: Create same IP group again (Idempotent test) + azure_rm_ipgroup: + resource_group: "{{ resource_group }}" + name: "{{ group_name }}" + location: eastus + ip_addresses: + - 13.64.39.16/32 + - 40.74.146.80/31 + - 40.74.147.32/28 + tags: + key1: "value1" + state: present + register: results + +- name: Assert that output is not changed + assert: + that: not results.changed + +- name: Update IP group + azure_rm_ipgroup: + resource_group: "{{ resource_group }}" + name: "{{ group_name }}" + location: eastus + ip_addresses: + - 10.0.0.0/24 + tags: + key2: "value2" + register: results + +- name: Assert that IP group is updated + assert: + that: results.changed + +- name: Get IP group facts + azure_rm_ipgroup_info: + resource_group: "{{ resource_group }}" + name: "{{ group_name }}" + register: results + +- assert: + that: + - not results.changed + - results.ipgroups[0].id != None + - results.ipgroups[0].name == "{{ group_name }}" + - results.ipgroups[0].location == "eastus" + - results.ipgroups[0].provisioning_state == "Succeeded" + - results.ipgroups[0].ip_addresses == ["10.0.0.0/24"] + - results.ipgroups[0].tags | length > 0 + +- name: Delete IP group + azure_rm_ipgroup: + resource_group: "{{ resource_group }}" + name: "{{ group_name }}" + state: absent + register: results + +- name: Assert that IP group is deleted + assert: + that: results.changed + +- name: Delete IP group again (Idempotent test) + azure_rm_ipgroup: + resource_group: "{{ resource_group }}" + name: "{{ group_name }}" + state: absent + register: results + +- name: Asset that output is not changed + assert: + that: not results.changed diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvault/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvault/aliases new file mode 100644 index 000000000..c256751e5 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvault/aliases @@ -0,0 +1,5 @@ +cloud/azure +destructive +shippable/azure/group9 +azure_rm_keyvaultkey +azure_rm_keyvaultsecret diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvault/lookup_plugins/azure_service_principal_attribute.py b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvault/lookup_plugins/azure_service_principal_attribute.py new file mode 100644 index 000000000..1b7d0318f --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvault/lookup_plugins/azure_service_principal_attribute.py @@ -0,0 +1,94 @@ +# (c) 2018 Yunge Zhu, +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = """ +lookup: azure_service_principal_attribute + +requirements: + - azure-graphrbac + +author: + - Yunge Zhu + +version_added: "2.7" + +short_description: Look up Azure service principal attributes. + +description: + - Describes object id of your Azure service principal account. +options: + azure_client_id: + description: azure service principal client id. + azure_secret: + description: azure service principal secret + azure_tenant: + description: azure tenant + azure_cloud_environment: + description: azure cloud environment +""" + +EXAMPLES = """ +set_fact: + object_id: "{{ lookup('azure_service_principal_attribute', + azure_client_id=azure_client_id, + azure_secret=azure_secret, + azure_tenant=azure_secret) }}" +""" + +RETURN = """ +_raw: + description: + Returns object id of service principal. +""" + +from ansible.errors import AnsibleError +from ansible.plugins import AnsiblePlugin +from ansible.plugins.lookup import LookupBase +from ansible.module_utils._text import to_native + +try: + from azure.common.credentials import ServicePrincipalCredentials + from azure.graphrbac import GraphRbacManagementClient + from msrestazure import azure_cloud + from msrestazure.azure_exceptions import CloudError +except ImportError: + raise AnsibleError( + "The lookup azure_service_principal_attribute requires azure.graphrbac, msrest") + + +class LookupModule(LookupBase): + def run(self, terms, variables, **kwargs): + + self.set_options(direct=kwargs) + + credentials = {} + credentials['azure_client_id'] = self.get_option('azure_client_id', None) + credentials['azure_secret'] = self.get_option('azure_secret', None) + credentials['azure_tenant'] = self.get_option('azure_tenant', 'common') + + if credentials['azure_client_id'] is None or credentials['azure_secret'] is None: + raise AnsibleError("Must specify azure_client_id and azure_secret") + + _cloud_environment = azure_cloud.AZURE_PUBLIC_CLOUD + if self.get_option('azure_cloud_environment', None) is not None: + cloud_environment = azure_cloud.get_cloud_from_metadata_endpoint(credentials['azure_cloud_environment']) + + try: + azure_credentials = ServicePrincipalCredentials(client_id=credentials['azure_client_id'], + secret=credentials['azure_secret'], + tenant=credentials['azure_tenant'], + resource=_cloud_environment.endpoints.active_directory_graph_resource_id) + + client = GraphRbacManagementClient(azure_credentials, credentials['azure_tenant'], + base_url=_cloud_environment.endpoints.active_directory_graph_resource_id) + + response = list(client.service_principals.list(filter="appId eq '{0}'".format(credentials['azure_client_id']))) + sp = response[0] + + return sp.object_id.split(',') + except CloudError as ex: + raise AnsibleError("Failed to get service principal object id: %s" % to_native(ex)) + return False diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvault/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvault/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvault/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvault/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvault/tasks/main.yml new file mode 100644 index 000000000..2cd690fbc --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvault/tasks/main.yml @@ -0,0 +1,277 @@ +- name: Prepare random number + set_fact: + rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + tenant_id: "{{ azure_tenant }}" + run_once: yes + +- name: lookup service principal object id + set_fact: + object_id: "{{ lookup('azure_service_principal_attribute', + azure_client_id=azure_client_id, + azure_secret=azure_secret, + azure_tenant=tenant_id) }}" + register: object_id_facts + +- name: Create instance of Key Vault -- check mode + azure_rm_keyvault: + resource_group: "{{ resource_group }}" + vault_name: "vault{{ rpfx }}" + enabled_for_deployment: yes + vault_tenant: "{{ tenant_id }}" + soft_delete_retention_in_days: 7 + sku: + name: standard + family: A + access_policies: + - tenant_id: "{{ tenant_id }}" + object_id: "{{ object_id }}" + keys: + - get + - list + - update + - create + - import + - delete + - recover + - backup + - restore + secrets: + - get + - list + - set + - delete + - recover + - backup + - restore + check_mode: yes + register: output +- name: Assert the resource instance is well created + assert: + that: + - output.changed + +- name: Create instance of Key Vault + azure_rm_keyvault: + resource_group: "{{ resource_group }}" + vault_name: "vault{{ rpfx }}" + enabled_for_deployment: yes + vault_tenant: "{{ tenant_id }}" + soft_delete_retention_in_days: 7 + sku: + name: standard + family: A + access_policies: + - tenant_id: "{{ tenant_id }}" + object_id: "{{ object_id }}" + secrets: + - get + - list + - set + - delete + - recover + - backup + - restore + register: output + +- name: Assert the resource instance is well created + assert: + that: + - output.changed + +- name: Create instance of Key Vault again + azure_rm_keyvault: + resource_group: "{{ resource_group }}" + vault_name: "vault{{ rpfx }}" + enabled_for_deployment: yes + vault_tenant: "{{ tenant_id }}" + soft_delete_retention_in_days: 7 + sku: + name: standard + family: A + access_policies: + - tenant_id: "{{ tenant_id }}" + object_id: "{{ object_id }}" + secrets: + - get + - list + - set + - delete + - recover + - backup + - restore + register: output +- name: Assert the state has not changed + assert: + that: + - output.changed == false + +- name: Update existing Key Vault (add a rule and tags) + azure_rm_keyvault: + resource_group: "{{ resource_group }}" + vault_name: "vault{{ rpfx }}" + enabled_for_deployment: yes + vault_tenant: "{{ tenant_id }}" + soft_delete_retention_in_days: 7 + sku: + name: standard + family: A + access_policies: + - tenant_id: "{{ tenant_id }}" + object_id: "{{ object_id }}" + keys: + - get + - list + - update + - create + - import + - delete + - recover + - backup + - restore + secrets: + - get + - list + - set + - delete + - recover + - backup + - restore + tags: + aaa: bbb + register: output +- name: Assert the state has changed + assert: + that: + - output.changed == true + +- name: Get key vault facts + azure_rm_keyvault_info: + resource_group: "{{ resource_group }}" + name: "vault{{ rpfx }}" + register: facts + +- name: Assert the facts are properly set + assert: + that: + - facts['keyvaults'] | length == 1 + - facts['keyvaults'][0]['vault_uri'] != None + - facts['keyvaults'][0]['name'] != None + - facts['keyvaults'][0]['access_policies'] != None + - facts['keyvaults'][0]['sku'] != None + - facts['keyvaults'][0]['id'] != None + - facts['keyvaults'][0]['enable_soft_delete'] == true + - facts['keyvaults'][0]['soft_delete_retention_in_days'] == 7 +# +# azure_rm_keyvaultkey tests +# + +- name: create a keyvault key + block: + - azure_rm_keyvaultkey: + keyvault_uri: https://vault{{ rpfx }}.vault.azure.net + key_name: testkey + tags: + testing: test + delete: on-exit + register: output + - assert: + that: output.changed + rescue: + - azure_rm_keyvaultkey: + keyvault_uri: https://vault{{ rpfx }}.vault.azure.net + state: absent + key_name: testkey + +- name: Get key current version + azure_rm_keyvaultkey_info: + vault_uri: https://vault{{ rpfx }}.vault.azure.net + name: testkey + register: facts + +- name: Assert key facts + assert: + that: + - facts['keys'] | length == 1 + - facts['keys'][0]['kid'] + - facts['keys'][0]['permitted_operations'] | length > 0 + - facts['keys'][0]['type'] + - facts['keys'][0]['version'] + +- name: delete a kevyault key + azure_rm_keyvaultkey: + keyvault_uri: https://vault{{ rpfx }}.vault.azure.net + state: absent + key_name: testkey + register: output + +- assert: + that: output.changed + +# +# azure_rm_keyvaultsecret tests +# +- name: create a keyvault secret + block: + - azure_rm_keyvaultsecret: + keyvault_uri: https://vault{{ rpfx }}.vault.azure.net + secret_name: testsecret + secret_value: 'mysecret' + tags: + testing: test + delete: on-exit + register: output + - assert: + that: output.changed + rescue: + - azure_rm_keyvaultsecret: + keyvault_uri: https://vault{{ rpfx }}.vault.azure.net + state: absent + secret_name: testsecret + +- name: delete a keyvault secret + azure_rm_keyvaultsecret: + keyvault_uri: https://vault{{ rpfx }}.vault.azure.net + state: absent + secret_name: testsecret + register: output + +- assert: + that: output.changed + +# +# azure_rm_keyvault finalize & clean up +# + +- name: Delete instance of Key Vault -- check mode + azure_rm_keyvault: + resource_group: "{{ resource_group }}" + vault_name: "vault{{ rpfx }}" + state: absent + check_mode: yes + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Delete instance of Key Vault + azure_rm_keyvault: + resource_group: "{{ resource_group }}" + vault_name: "vault{{ rpfx }}" + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Delete unexisting instance of Key Vault + azure_rm_keyvault: + resource_group: "{{ resource_group }}" + vault_name: "vault{{ rpfx }}" + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed == false diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvaultkey/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvaultkey/aliases new file mode 100644 index 000000000..8f7a9a2e5 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvaultkey/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group1 +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvaultkey/lookup_plugins/azure_service_principal_attribute.py b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvaultkey/lookup_plugins/azure_service_principal_attribute.py new file mode 100644 index 000000000..1b7d0318f --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvaultkey/lookup_plugins/azure_service_principal_attribute.py @@ -0,0 +1,94 @@ +# (c) 2018 Yunge Zhu, +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = """ +lookup: azure_service_principal_attribute + +requirements: + - azure-graphrbac + +author: + - Yunge Zhu + +version_added: "2.7" + +short_description: Look up Azure service principal attributes. + +description: + - Describes object id of your Azure service principal account. +options: + azure_client_id: + description: azure service principal client id. + azure_secret: + description: azure service principal secret + azure_tenant: + description: azure tenant + azure_cloud_environment: + description: azure cloud environment +""" + +EXAMPLES = """ +set_fact: + object_id: "{{ lookup('azure_service_principal_attribute', + azure_client_id=azure_client_id, + azure_secret=azure_secret, + azure_tenant=azure_secret) }}" +""" + +RETURN = """ +_raw: + description: + Returns object id of service principal. +""" + +from ansible.errors import AnsibleError +from ansible.plugins import AnsiblePlugin +from ansible.plugins.lookup import LookupBase +from ansible.module_utils._text import to_native + +try: + from azure.common.credentials import ServicePrincipalCredentials + from azure.graphrbac import GraphRbacManagementClient + from msrestazure import azure_cloud + from msrestazure.azure_exceptions import CloudError +except ImportError: + raise AnsibleError( + "The lookup azure_service_principal_attribute requires azure.graphrbac, msrest") + + +class LookupModule(LookupBase): + def run(self, terms, variables, **kwargs): + + self.set_options(direct=kwargs) + + credentials = {} + credentials['azure_client_id'] = self.get_option('azure_client_id', None) + credentials['azure_secret'] = self.get_option('azure_secret', None) + credentials['azure_tenant'] = self.get_option('azure_tenant', 'common') + + if credentials['azure_client_id'] is None or credentials['azure_secret'] is None: + raise AnsibleError("Must specify azure_client_id and azure_secret") + + _cloud_environment = azure_cloud.AZURE_PUBLIC_CLOUD + if self.get_option('azure_cloud_environment', None) is not None: + cloud_environment = azure_cloud.get_cloud_from_metadata_endpoint(credentials['azure_cloud_environment']) + + try: + azure_credentials = ServicePrincipalCredentials(client_id=credentials['azure_client_id'], + secret=credentials['azure_secret'], + tenant=credentials['azure_tenant'], + resource=_cloud_environment.endpoints.active_directory_graph_resource_id) + + client = GraphRbacManagementClient(azure_credentials, credentials['azure_tenant'], + base_url=_cloud_environment.endpoints.active_directory_graph_resource_id) + + response = list(client.service_principals.list(filter="appId eq '{0}'".format(credentials['azure_client_id']))) + sp = response[0] + + return sp.object_id.split(',') + except CloudError as ex: + raise AnsibleError("Failed to get service principal object id: %s" % to_native(ex)) + return False diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvaultkey/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvaultkey/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvaultkey/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvaultkey/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvaultkey/tasks/main.yml new file mode 100644 index 000000000..1d09f9e6d --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvaultkey/tasks/main.yml @@ -0,0 +1,186 @@ +- name: Prepare random number + set_fact: + rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + tenant_id: "{{ azure_tenant }}" + run_once: yes + +- name: lookup service principal object id + set_fact: + object_id: "{{ lookup('azure_service_principal_attribute', + azure_client_id=azure_client_id, + azure_secret=azure_secret, + azure_tenant=tenant_id) }}" + register: object_id_facts + +- name: Create instance of Key Vault + azure_rm_keyvault: + resource_group: "{{ resource_group }}" + vault_name: "vault{{ rpfx }}" + enabled_for_deployment: yes + vault_tenant: "{{ tenant_id }}" + sku: + name: standard + family: A + access_policies: + - tenant_id: "{{ tenant_id }}" + object_id: '{{ object_id }}' + keys: + - get + - list + - update + - create + - import + - delete + - recover + - backup + - restore + - encrypt + - decrypt + - wrapkey + - unwrapkey + - sign + - verify + secrets: + - get + - list + - set + - delete + - recover + - backup + - restore + register: output + +- name: create a kevyault key + block: + - azure_rm_keyvaultkey: + keyvault_uri: https://vault{{ rpfx }}.vault.azure.net + key_name: testkey + tags: + testing: test + delete: on-exit + register: output + - assert: + that: output.changed + rescue: + - azure_rm_keyvaultkey: + keyvault_uri: https://vault{{ rpfx }}.vault.azure.net + state: absent + key_name: testkey + +- name: delete a kevyault key + azure_rm_keyvaultkey: + keyvault_uri: https://vault{{ rpfx }}.vault.azure.net + state: absent + key_name: testkey + register: output + +- name: create a kevyault key of type EC + block: + - azure_rm_keyvaultkey: + keyvault_uri: https://vault{{ rpfx }}.vault.azure.net + key_name: testkeyEC + key_type: EC + tags: + testing: test + delete: on-exit + register: output + - assert: + that: output.changed + rescue: + - azure_rm_keyvaultkey: + keyvault_uri: https://vault{{ rpfx }}.vault.azure.net + state: absent + key_name: testkeyEC + +- name: delete a kevyault key of type EC + azure_rm_keyvaultkey: + keyvault_uri: https://vault{{ rpfx }}.vault.azure.net + state: absent + key_name: testkeyEC + register: output + +- name: create a kevyault key of size 4096 + block: + - azure_rm_keyvaultkey: + keyvault_uri: https://vault{{ rpfx }}.vault.azure.net + key_name: testkey4096 + key_size: 4096 + tags: + testing: test + delete: on-exit + register: output + - assert: + that: output.changed + rescue: + - azure_rm_keyvaultkey: + keyvault_uri: https://vault{{ rpfx }}.vault.azure.net + state: absent + key_name: testkey4096 + +- name: delete a kevyault key of size 4096 + azure_rm_keyvaultkey: + keyvault_uri: https://vault{{ rpfx }}.vault.azure.net + state: absent + key_name: testkey4096 + register: output + +- name: create a kevyault key with P-521 curve + block: + - azure_rm_keyvaultkey: + keyvault_uri: https://vault{{ rpfx }}.vault.azure.net + key_name: testkeycurve + curve: P-521 + tags: + testing: test + delete: on-exit + register: output + - assert: + that: output.changed + rescue: + - azure_rm_keyvaultkey: + keyvault_uri: https://vault{{ rpfx }}.vault.azure.net + state: absent + key_name: testkeycurve + +- name: delete a kevyault key with P-521 curve + azure_rm_keyvaultkey: + keyvault_uri: https://vault{{ rpfx }}.vault.azure.net + state: absent + key_name: testkeycurve + register: output + +- name: create a kevyault key with attributes + block: + - azure_rm_keyvaultkey: + keyvault_uri: https://vault{{ rpfx }}.vault.azure.net + key_name: testkeyattribute + key_attributes: + enabled: true + not_before: '2032-12-01T00:00:00Z' + tags: + testing: test + delete: on-exit + register: output + - assert: + that: output.changed + rescue: + - azure_rm_keyvaultkey: + keyvault_uri: https://vault{{ rpfx }}.vault.azure.net + state: absent + key_name: testkeyattributes + +- name: delete a kevyault key with attributes + azure_rm_keyvaultkey: + keyvault_uri: https://vault{{ rpfx }}.vault.azure.net + state: absent + key_name: testkeyattribute + register: output + +- assert: + that: output.changed + +- name: Delete instance of Key Vault + azure_rm_keyvault: + resource_group: "{{ resource_group }}" + vault_name: "vault{{ rpfx }}" + state: absent diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvaultsecret/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvaultsecret/aliases new file mode 100644 index 000000000..fd1a5ed5a --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvaultsecret/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group1 +destructive \ No newline at end of file diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvaultsecret/lookup_plugins/azure_service_principal_attribute.py b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvaultsecret/lookup_plugins/azure_service_principal_attribute.py new file mode 100644 index 000000000..1b7d0318f --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvaultsecret/lookup_plugins/azure_service_principal_attribute.py @@ -0,0 +1,94 @@ +# (c) 2018 Yunge Zhu, +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +DOCUMENTATION = """ +lookup: azure_service_principal_attribute + +requirements: + - azure-graphrbac + +author: + - Yunge Zhu + +version_added: "2.7" + +short_description: Look up Azure service principal attributes. + +description: + - Describes object id of your Azure service principal account. +options: + azure_client_id: + description: azure service principal client id. + azure_secret: + description: azure service principal secret + azure_tenant: + description: azure tenant + azure_cloud_environment: + description: azure cloud environment +""" + +EXAMPLES = """ +set_fact: + object_id: "{{ lookup('azure_service_principal_attribute', + azure_client_id=azure_client_id, + azure_secret=azure_secret, + azure_tenant=azure_secret) }}" +""" + +RETURN = """ +_raw: + description: + Returns object id of service principal. +""" + +from ansible.errors import AnsibleError +from ansible.plugins import AnsiblePlugin +from ansible.plugins.lookup import LookupBase +from ansible.module_utils._text import to_native + +try: + from azure.common.credentials import ServicePrincipalCredentials + from azure.graphrbac import GraphRbacManagementClient + from msrestazure import azure_cloud + from msrestazure.azure_exceptions import CloudError +except ImportError: + raise AnsibleError( + "The lookup azure_service_principal_attribute requires azure.graphrbac, msrest") + + +class LookupModule(LookupBase): + def run(self, terms, variables, **kwargs): + + self.set_options(direct=kwargs) + + credentials = {} + credentials['azure_client_id'] = self.get_option('azure_client_id', None) + credentials['azure_secret'] = self.get_option('azure_secret', None) + credentials['azure_tenant'] = self.get_option('azure_tenant', 'common') + + if credentials['azure_client_id'] is None or credentials['azure_secret'] is None: + raise AnsibleError("Must specify azure_client_id and azure_secret") + + _cloud_environment = azure_cloud.AZURE_PUBLIC_CLOUD + if self.get_option('azure_cloud_environment', None) is not None: + cloud_environment = azure_cloud.get_cloud_from_metadata_endpoint(credentials['azure_cloud_environment']) + + try: + azure_credentials = ServicePrincipalCredentials(client_id=credentials['azure_client_id'], + secret=credentials['azure_secret'], + tenant=credentials['azure_tenant'], + resource=_cloud_environment.endpoints.active_directory_graph_resource_id) + + client = GraphRbacManagementClient(azure_credentials, credentials['azure_tenant'], + base_url=_cloud_environment.endpoints.active_directory_graph_resource_id) + + response = list(client.service_principals.list(filter="appId eq '{0}'".format(credentials['azure_client_id']))) + sp = response[0] + + return sp.object_id.split(',') + except CloudError as ex: + raise AnsibleError("Failed to get service principal object id: %s" % to_native(ex)) + return False diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvaultsecret/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvaultsecret/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvaultsecret/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvaultsecret/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvaultsecret/tasks/main.yml new file mode 100644 index 000000000..0a4226a0e --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_keyvaultsecret/tasks/main.yml @@ -0,0 +1,96 @@ +- name: Prepare random number + set_fact: + rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + tenant_id: "{{ azure_tenant }}" + run_once: yes + +- name: lookup service principal object id + set_fact: + object_id: "{{ lookup('azure_service_principal_attribute', + azure_client_id=azure_client_id, + azure_secret=azure_secret, + azure_tenant=tenant_id) }}" + register: object_id_facts + +- name: Create instance of Key Vault + azure_rm_keyvault: + resource_group: "{{ resource_group }}" + vault_name: "vault{{ rpfx }}" + enabled_for_deployment: yes + vault_tenant: "{{ tenant_id }}" + sku: + name: standard + family: A + access_policies: + - tenant_id: "{{ tenant_id }}" + object_id: "{{ object_id }}" + keys: + - get + - list + - update + - create + - import + - delete + - recover + - backup + - restore + secrets: + - get + - list + - set + - delete + - recover + - backup + - restore + register: output + +- name: create a kevyault secret + block: + - azure_rm_keyvaultsecret: + keyvault_uri: https://vault{{ rpfx }}.vault.azure.net + secret_name: testsecret + secret_value: 'mysecret' + content_type: 'Content Type Secret' + secret_valid_from: 2000-01-02T010203Z + secret_expiry: 2030-03-04T040506Z + tags: + testing: test + delete: on-exit + register: output + - assert: + that: output.changed + rescue: + - azure_rm_keyvaultsecret: + keyvault_uri: https://vault{{ rpfx }}.vault.azure.net + state: absent + secret_name: testsecret + +- name: Get secret current version + azure_rm_keyvaultsecret_info: + vault_uri: https://vault{{ rpfx }}.vault.azure.net + name: testsecret + register: facts + +- name: Assert secret facts + assert: + that: + - facts['secrets'] | length == 1 + - facts['secrets'][0]['sid'] + - facts['secrets'][0]['secret'] + - facts['secrets'][0]['tags'] + - facts['secrets'][0]['version'] + - facts['secrets'][0]['attributes']['expires'] + - facts['secrets'][0]['attributes']['not_before'] + - facts['secrets'][0]['content_type'] == 'Content Type Secret' + - facts['secrets'][0]['attributes']['expires'] == "2030-03-04T04:05:06+00:00" + - facts['secrets'][0]['attributes']['not_before'] == "2000-01-02T01:02:03+00:00" + +- name: delete a kevyault secret + azure_rm_keyvaultsecret: + keyvault_uri: https://vault{{ rpfx }}.vault.azure.net + state: absent + secret_name: testsecret + register: output + +- assert: + that: output.changed diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_loadbalancer/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_loadbalancer/aliases new file mode 100644 index 000000000..cc941b59c --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_loadbalancer/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group12 +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_loadbalancer/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_loadbalancer/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_loadbalancer/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_loadbalancer/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_loadbalancer/tasks/main.yml new file mode 100644 index 000000000..2c1eb736e --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_loadbalancer/tasks/main.yml @@ -0,0 +1,333 @@ +- name: Prepare random number + set_fact: + pipaname: "pipa{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + pipbname: "pipb{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + lbvnname: "lbvn{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + lbname_a: "lba{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + lbname_b: "lbb{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + lbname_c1: "lbc1-{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + lbname_c2: "lbc2-{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + lbname_d: "lbd{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + run_once: yes + +- name: create public ip + azure_rm_publicipaddress: + name: "{{ pipbname }}" + sku: Standard + allocation_method: Static + resource_group: '{{ resource_group }}' + +- name: create public ip + azure_rm_publicipaddress: + name: "{{ pipaname }}" + resource_group: '{{ resource_group }}' + +- name: clear load balancer + azure_rm_loadbalancer: + resource_group: '{{ resource_group }}' + name: "{{ lbname_a }}" + state: absent + +- name: create load balancer -- check mode + azure_rm_loadbalancer: + resource_group: '{{ resource_group }}' + name: "{{ lbname_a }}" + public_ip: "{{ pipaname }}" + check_mode: yes + register: output + +- name: assert load balancer created + assert: + that: output.changed + +- name: create load balancer + azure_rm_loadbalancer: + resource_group: '{{ resource_group }}' + name: "{{ lbname_a }}" + public_ip: "{{ pipaname }}" + register: output + +- name: assert load balancer created + assert: + that: output.changed + +- name: create load balancer -- idempotent + azure_rm_loadbalancer: + resource_group: '{{ resource_group }}' + name: "{{ lbname_a }}" + public_ip: "{{ pipaname }}" + register: output + +- name: assert no change + assert: + that: + - not output.changed + +- name: delete load balancer + azure_rm_loadbalancer: + resource_group: '{{ resource_group }}' + name: "{{ lbname_a }}" + state: absent + register: output + +- name: assert load balancer deleted + assert: + that: output.changed + +- name: delete load balancer (idempotent) + azure_rm_loadbalancer: + resource_group: '{{ resource_group }}' + name: "{{ lbname_a }}" + state: absent + register: output + +- name: assert load balancer deleted (idempotent) + assert: + that: not output.changed + +- name: create another load balancer with more options + azure_rm_loadbalancer: + resource_group: '{{ resource_group }}' + name: "{{ lbname_b }}" + sku: Standard + public_ip_address: "{{ pipbname }}" + probe_protocol: Tcp + probe_port: 80 + probe_interval: 10 + probe_fail_count: 3 + protocol: Tcp + load_distribution: Default + frontend_port: 80 + backend_port: 8080 + idle_timeout: 4 + natpool_frontend_port_start: 30 + natpool_frontend_port_end: 40 + natpool_backend_port: 80 + natpool_protocol: Tcp + register: output + +- name: assert complex load balancer created + assert: + that: + - output.changed + - output.state.sku.name == 'Standard' + +- name: create load balancer again to check idempotency + azure_rm_loadbalancer: + resource_group: '{{ resource_group }}' + name: "{{ lbname_b }}" + sku: Standard + public_ip_address: "{{ pipbname }}" + probe_protocol: Tcp + probe_port: 80 + probe_interval: 10 + probe_fail_count: 3 + protocol: Tcp + load_distribution: Default + frontend_port: 80 + backend_port: 8080 + idle_timeout: 4 + natpool_frontend_port_start: 30 + natpool_frontend_port_end: 40 + natpool_backend_port: 80 + natpool_protocol: Tcp + register: output + +- name: assert that output has not changed + assert: + that: + - not output.changed + +- name: create load balancer again to check idempotency - change something + azure_rm_loadbalancer: + resource_group: '{{ resource_group }}' + name: "{{ lbname_b }}" + sku: Standard + public_ip_address: "{{ pipbname }}" + probe_protocol: Tcp + probe_port: 80 + probe_interval: 10 + probe_fail_count: 3 + protocol: Tcp + load_distribution: Default + frontend_port: 81 + backend_port: 8080 + idle_timeout: 4 + natpool_frontend_port_start: 30 + natpool_frontend_port_end: 40 + natpool_backend_port: 80 + natpool_protocol: Tcp + register: output + +- name: assert that output has changed + assert: + that: + - output.changed + +- name: delete load balancer + azure_rm_loadbalancer: + resource_group: '{{ resource_group }}' + name: "{{ lbname_b }}" + state: absent + +- name: create load balancer with multiple parameters + azure_rm_loadbalancer: + resource_group: '{{ resource_group }}' + name: "{{ lbname_c1 }}" + frontend_ip_configurations: + - name: frontendipconf0 + public_ip_address: "{{ pipaname }}" + backend_address_pools: + - name: backendaddrpool0 + probes: + - name: prob0 + port: 80 + inbound_nat_pools: + - name: inboundnatpool0 + frontend_ip_configuration_name: frontendipconf0 + protocol: Tcp + frontend_port_range_start: 80 + frontend_port_range_end: 81 + backend_port: 8080 + load_balancing_rules: + - name: lbrbalancingrule0 + frontend_ip_configuration: frontendipconf0 + backend_address_pool: backendaddrpool0 + frontend_port: 80 + backend_port: 80 + probe: prob0 + disable_outbound_snat: True + register: output + +- name: assert complex load balancer created + assert: + that: + - output.changed + +- name: delete load balancer + azure_rm_loadbalancer: + resource_group: '{{ resource_group }}' + name: "{{ lbname_c1 }}" + state: absent + +- name: create load balancer with multiple parameters + azure_rm_loadbalancer: + resource_group: '{{ resource_group }}' + name: "{{ lbname_c2 }}" + frontend_ip_configurations: + - name: frontendipconf0 + public_ip_address: "{{ pipaname }}" + backend_address_pools: + - name: backendaddrpool0 + probes: + - name: prob0 + port: 80 + load_balancing_rules: + - name: lbrbalancingrule0 + frontend_ip_configuration: frontendipconf0 + backend_address_pool: backendaddrpool0 + frontend_port: 80 + backend_port: 80 + probe: prob0 + disable_outbound_snat: True + inbound_nat_rules: + - name: inboundnatrule0 + backend_port: 8080 + protocol: Tcp + frontend_port: 8080 + frontend_ip_configuration: frontendipconf0 + register: output + +- name: assert complex load balancer created + assert: + that: output.changed + +- name: delete load balancer + azure_rm_loadbalancer: + resource_group: '{{ resource_group }}' + name: "{{ lbname_c2 }}" + state: absent + +- name: Create virtual network + azure_rm_virtualnetwork: + resource_group: "{{ resource_group }}" + name: "{{ lbvnname }}" + address_prefixes: "10.10.0.0/16" + +- name: Add subnet + azure_rm_subnet: + resource_group: "{{ resource_group }}" + name: "lb{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}sb" + address_prefix: "10.10.0.0/24" + virtual_network: "{{ lbvnname }}" + register: subnet + +- name: create internal loadbalancer + azure_rm_loadbalancer: + resource_group: '{{ resource_group }}' + name: "{{ lbname_d }}" + sku: Standard + frontend_ip_configurations: + - name: frontendipconf0 + private_ip_address: 10.10.0.10 + private_ip_allocation_method: Static + subnet: "{{ subnet.state.id }}" + zones: + - 1 + - 2 + - 3 + backend_address_pools: + - name: backendaddrpool0 + probes: + - name: prob0 + port: 80 + inbound_nat_pools: + - name: inboundnatpool0 + frontend_ip_configuration_name: frontendipconf0 + protocol: Tcp + frontend_port_range_start: 80 + frontend_port_range_end: 81 + backend_port: 8080 + load_balancing_rules: + - name: lbrbalancingrule0 + frontend_ip_configuration: frontendipconf0 + backend_address_pool: backendaddrpool0 + frontend_port: 80 + backend_port: 80 + probe: prob0 + disable_outbound_snat: False + register: output + +- name: assert complex load balancer created + assert: + that: + - output.changed + - output.state.frontend_ip_configurations[0].zones | length == 3 + +- name: delete load balancer + azure_rm_loadbalancer: + resource_group: '{{ resource_group }}' + name: "{{ lbname_d }}" + state: absent + +- name: cleanup public ip + azure_rm_publicipaddress: + name: "{{ item }}" + resource_group: '{{ resource_group }}' + state: absent + with_items: + - "{{ pipaname }}" + - "{{ pipbname }}" + +- name: cleanup subnet + azure_rm_subnet: + resource_group: "{{ resource_group }}" + name: "lb{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}sb" + virtual_network: "{{ lbvnname }}" + state: absent + +- name: cleanup virtual network + azure_rm_virtualnetwork: + resource_group: "{{ resource_group }}" + name: "{{ lbvnname }}" + state: absent diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_loganalyticsworkspace/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_loganalyticsworkspace/aliases new file mode 100644 index 000000000..670aa52ec --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_loganalyticsworkspace/aliases @@ -0,0 +1,5 @@ +cloud/azure +shippable/azure/group4 +destructive +azure_rm_loganalyticsworkspace +azure_rm_loganalyticsworkspace_info diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_loganalyticsworkspace/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_loganalyticsworkspace/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_loganalyticsworkspace/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_loganalyticsworkspace/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_loganalyticsworkspace/tasks/main.yml new file mode 100644 index 000000000..3c9e1d493 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_loganalyticsworkspace/tasks/main.yml @@ -0,0 +1,183 @@ +- name: Prepare random number + set_fact: + name: "loganalytics{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + retention_days: 60 + +- name: Create Log Analytics Workspace (Check Mode On) + azure_rm_loganalyticsworkspace: + name: "{{ name }}" + intelligence_packs: + Backup: true + Containers: true + retention_in_days: "{{ retention_days }}" + resource_group: "{{ resource_group }}" + check_mode: yes + register: output + +- assert: + that: + - output.changed + +- name: Get Log Analytics workspace information + azure_rm_loganalyticsworkspace_info: + name: "{{ name }}" + resource_group: "{{ resource_group }}" + show_intelligence_packs: true + show_management_groups: true + show_shared_keys: true + show_usages: true + register: facts + +- assert: + that: + - facts.workspaces | length == 0 + +- name: Create Log Analytics Workspace + azure_rm_loganalyticsworkspace: + name: "{{ name }}" + intelligence_packs: + Backup: true + Containers: true + retention_in_days: 30 + resource_group: "{{ resource_group }}" + tags: + key1: value1 + register: output + +- assert: + that: + - output.changed + - output.retention_in_days == 30 + # - output.intelligence_packs | json_query('[?name == `Backup`].enabled') | first == true + # - output.intelligence_packs | json_query('[?name == `Containers`].enabled') | first == true + - output.sku == 'per_gb2018' + - output.tags.key1 == 'value1' + +- name: Update Log Analytics Workspace + azure_rm_loganalyticsworkspace: + name: "{{ name }}" + intelligence_packs: + Backup: true + Containers: true + retention_in_days: "{{ retention_days }}" + resource_group: "{{ resource_group }}" + tags: + key1: value1 + key2: value2 + register: output + +- assert: + that: + - output.changed + - output.retention_in_days == retention_days + - output.tags.key2 == 'value2' + +- name: Get Log Analytics workspace information (Show full information) + azure_rm_loganalyticsworkspace_info: + name: "{{ name }}" + resource_group: "{{ resource_group }}" + show_intelligence_packs: true + show_management_groups: true + show_shared_keys: true + show_usages: true + register: facts + +- assert: + that: + - not facts.changed + - facts.workspaces | length > 0 + - facts.workspaces[0].retention_in_days == retention_days + - facts.workspaces[0].intelligence_packs | length > 0 + # - facts.workspaces[0].intelligence_packs | json_query('[?name == `Backup`].enabled') | first == true + # - facts.workspaces[0].intelligence_packs | json_query('[?name == `Containers`].enabled') | first == true + - facts.workspaces[0].shared_keys is defined + - facts.workspaces[0].shared_keys.primary_shared_key is defined + - facts.workspaces[0].shared_keys.secondary_shared_key is defined + - facts.workspaces[0].usages is defined + - facts.workspaces[0].usages | length > 0 + - facts.workspaces[0].management_groups is defined + - facts.workspaces[0].sku == 'per_gb2018' + +- name: Get Log Analytics workspace information (Show default information) + azure_rm_loganalyticsworkspace_info: + name: "{{ name }}" + resource_group: "{{ resource_group }}" + register: facts + +- assert: + that: + - not facts.changed + - facts.workspaces | length > 0 + - facts.workspaces[0].retention_in_days == retention_days + - facts.workspaces[0].intelligence_packs is not defined + - facts.workspaces[0].shared_keys is not defined + - facts.workspaces[0].usages is not defined + - facts.workspaces[0].management_groups is not defined + - facts.workspaces[0].sku == 'per_gb2018' + +- name: Create Log Analytics workspace (Test Idempotence) + azure_rm_loganalyticsworkspace: + name: "{{ name }}" + resource_group: "{{ resource_group }}" + retention_in_days: "{{ retention_days }}" + register: output + +- assert: + that: + - not output.changed + +- name: Remove Log Analytics workspace (Check Mode On) + azure_rm_loganalyticsworkspace: + name: "{{ name }}" + resource_group: "{{ resource_group }}" + state: absent + check_mode: yes + register: output + +- assert: + that: + - output.changed + +- name: Get Log Analytics workspace information(Check still exists after remove Check Mode On) + azure_rm_loganalyticsworkspace_info: + name: "{{ name }}" + resource_group: "{{ resource_group }}" + register: facts + +- assert: + that: + - facts.workspaces | length == 1 + +- name: Remove Log Analytics workspace + azure_rm_loganalyticsworkspace: + name: "{{ name }}" + resource_group: "{{ resource_group }}" + state: absent + force: true + register: output + +- assert: + that: + - output.changed + +- name: Get Log Analytics workspace information + azure_rm_loganalyticsworkspace_info: + name: "{{ name }}" + resource_group: "{{ resource_group }}" + register: facts + +- assert: + that: + - facts.workspaces | length == 0 + +- name: Remove Log Analytics workspace (Test Idempotence) + azure_rm_loganalyticsworkspace: + name: "{{ name }}" + resource_group: "{{ resource_group }}" + state: absent + force: true + register: output + +- assert: + that: + - not output.changed diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_manageddisk/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_manageddisk/aliases new file mode 100644 index 000000000..bf20c612b --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_manageddisk/aliases @@ -0,0 +1,4 @@ +cloud/azure +shippable/azure/group4 +destructive +azure_rm_manageddisk_info diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_manageddisk/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_manageddisk/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_manageddisk/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_manageddisk/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_manageddisk/tasks/main.yml new file mode 100644 index 000000000..ce6b4e6b6 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_manageddisk/tasks/main.yml @@ -0,0 +1,260 @@ + - name: Prepare random number + set_fact: + rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + parameter: {} + run_once: yes + + - name: Clearing (if) previous disks were created + azure_rm_manageddisk: + resource_group: "{{ resource_group }}" + name: "md{{ rpfx }}{{ item }}" + managed_by: '' + state: absent + with_items: + - 1 + - 2 + - 3 + + - name: Test invalid account name (should give error) + azure_rm_manageddisk: + resource_group: "{{ resource_group }}" + name: "invalid_char$" + disk_size_gb: 1 + register: output + ignore_errors: yes + check_mode: no + + - name: Assert task failed + assert: { that: "output['failed'] == True" } + + - name: Create managed disk (Check Mode) + azure_rm_manageddisk: + resource_group: "{{ resource_group }}" + name: "md{{ rpfx }}1" + storage_account_type: "Standard_LRS" + disk_size_gb: 1 + tags: + testing: testing + delete: never + register: output + check_mode: yes + + - name: Assert status succeeded (Check Mode) + assert: + that: + - output.changed + - output.state + + - name: Create new managed disk succesfully + azure_rm_manageddisk: + resource_group: "{{ resource_group }}" + name: "md{{ rpfx }}1" + storage_account_type: "Standard_LRS" + disk_size_gb: 1 + tags: + testing: testing + delete: never + register: output + + - name: Assert status succeeded and results include an Id value + assert: + that: + - output.changed + - output.state.disk_size_gb == 1 + - output.state.id is defined + - output.state.os_type == None + - output.state.storage_account_type == "Standard_LRS" + + - name: Copy disk to a new managed disk + azure_rm_manageddisk: + resource_group: "{{ resource_group }}" + name: "md{{ rpfx }}2" + create_option: "copy" + source_uri: "{{ output.state.id }}" + disk_size_gb: 1 + register: disk2 + + - name: Assert status succeeded and results include an Id value + assert: + that: + - disk2.changed + - disk2.state.id is defined + + - name: Create disk to a new managed disk with zone and os type + azure_rm_manageddisk: + resource_group: "{{ resource_group }}" + name: "md{{ rpfx }}3" + source_uri: "{{ output.state.id }}" + disk_size_gb: 1 + zone: "1" + os_type: windows + register: disk3 + + - name: Assert status succeeded and results include an Id value + assert: + that: + - disk3.changed + - disk3.state.id is defined + - disk3.state.zone == "1" + - disk3.state.os_type == "windows" + + - name: Change storage account type to an invalid type + azure_rm_manageddisk: + resource_group: "{{ resource_group }}" + name: "md{{ rpfx }}1" + storage_account_type: "PremiumL" + disk_size_gb: 1 + register: output + ignore_errors: yes + + - name: Assert storage account type change failed + assert: { that: "output['failed'] == True" } + + - name: Update disk options (os_type, account_type, size, tags) + azure_rm_manageddisk: + resource_group: "{{ resource_group }}" + name: "md{{ rpfx }}1" + disk_size_gb: 2 + os_type: linux + storage_account_type: "StandardSSD_LRS" + tags: + galaxy: "no" + delete: never + register: output + + - assert: + that: + - output.changed + - output.state.storage_account_type == "StandardSSD_LRS" + - output.state.disk_size_gb == 2 + - "output.state.tags | length == 2" + - "output.state.tags.galaxy == 'no'" + - output.state.os_type == 'linux' + + - name: Gather facts to one specific disk + azure_rm_manageddisk_info: + resource_group: "{{ resource_group }}" + name: "md{{ rpfx }}1" + register: output + + - assert: + that: + - "output.ansible_info.azure_managed_disk | length == 1" + - output.ansible_info.azure_managed_disk[0].storage_account_type == "StandardSSD_LRS" + - output.ansible_info.azure_managed_disk[0].disk_size_gb == 2 + - "output.ansible_info.azure_managed_disk[0].os_type == 'linux'" + + - set_fact: + parameter: "{{parameter |combine({item.key: item.value})}}" + when: "{{item.key not in ['id', 'changed'] and item.value != None}}" + with_dict: "{{ output.ansible_info.azure_managed_disk[0] }}" + + - name: Create disk with facts return value + azure_rm_manageddisk: + resource_group: "{{ resource_group }}" + location: "{{ parameter.location }}" + name: "{{ parameter.name }}" + storage_account_type: "{{ parameter.storage_account_type }}" + disk_size_gb: "{{ parameter.disk_size_gb }}" + create_option: "{{ parameter.create_option }}" + tags: "{{ parameter.tags }}" + register: output + + - assert: + that: + - not output.changed + + - name: Create new managed disk with I(account_type=StandardSSD_ZRS) + azure_rm_manageddisk: + resource_group: "{{ resource_group }}" + name: "md{{ rpfx }}4" + storage_account_type: "StandardSSD_ZRS" + disk_size_gb: 2 + location: westus2 + register: output + + - assert: + that: + - output.changed + - output.state.storage_account_type == "StandardSSD_ZRS" + - output.state.disk_size_gb == 2 + + - name: Gather facts to one specific disk + azure_rm_manageddisk_info: + resource_group: "{{ resource_group }}" + name: "md{{ rpfx }}4" + register: output + + - assert: + that: + - "output.ansible_info.azure_managed_disk | length == 1" + - output.ansible_info.azure_managed_disk[0].storage_account_type == "StandardSSD_ZRS" + - output.ansible_info.azure_managed_disk[0].disk_size_gb == 2 + + - name: Create new managed disk with I(account_type=Premium_ZRS) + azure_rm_manageddisk: + resource_group: "{{ resource_group }}" + name: "md{{ rpfx }}5" + storage_account_type: "Premium_ZRS" + disk_size_gb: 2 + location: westus2 + register: output + + - assert: + that: + - output.changed + - output.state.storage_account_type == "Premium_ZRS" + - output.state.disk_size_gb == 2 + + - name: Gather facts to one specific disk + azure_rm_manageddisk_info: + resource_group: "{{ resource_group }}" + name: "md{{ rpfx }}5" + register: output + + - assert: + that: + - "output.ansible_info.azure_managed_disk | length == 1" + - output.ansible_info.azure_managed_disk[0].storage_account_type == "Premium_ZRS" + - output.ansible_info.azure_managed_disk[0].disk_size_gb == 2 + + - name: Delete managed disk (Check Mode) + azure_rm_manageddisk: + resource_group: "{{ resource_group }}" + name: "md{{ rpfx }}1" + state: absent + register: output + check_mode: yes + + - name: Assert status succeeded + assert: + that: + - output.changed + - output.state + + - name: Delete all managed disk + azure_rm_manageddisk: + resource_group: "{{ resource_group }}" + name: "md{{ rpfx }}{{ item }}" + managed_by: '' + state: absent + with_items: + - 1 + - 2 + - 3 + - 4 + - 5 + + - name: Delete virtual machine + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "tr{{ rpfx }}" + state: absent + vm_size: Standard_DS1_v2 + + - name: Delete public ip + azure_rm_publicipaddress: + resource_group: "{{ resource_group }}" + allocation_method: Static + name: "tr{{ rpfx }}" + state: absent diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_managementgroup/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_managementgroup/aliases new file mode 100644 index 000000000..02ab1ed40 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_managementgroup/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group2 +disabled diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_managementgroup/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_managementgroup/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_managementgroup/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_managementgroup/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_managementgroup/tasks/main.yml new file mode 100644 index 000000000..3a6c03649 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_managementgroup/tasks/main.yml @@ -0,0 +1,35 @@ +- name: Get list of all managementgroups + azure_rm_managementgroup_info: + register: az_all_managementgroups + +- name: Get a managementgroup by id + azure_rm_managementgroup_info: + id: "{{ az_all_managementgroups.management_groups[0].id }}" + recurse: True + flatten: True + children: True + register: az_recursive_managementgroups + +- name: Get a managementgroup by name + azure_rm_managementgroup_info: + name: "{{ az_all_managementgroups.management_groups[0].name }}" + recurse: True + flatten: True + +- name: Test invalid name id combo + azure_rm_managementgroup_info: + name: "{{ az_all_managementgroups.management_groups[0].name }}" + id: "{{ az_all_managementgroups.management_groups[0].id }}" + register: invalid_name + ignore_errors: yes + +- name: Validate expected states + assert: + that: + - invalid_name['failed'] + +- name: Validate expected attributes + assert: + that: + - "{{ item.id is defined }}" + loop: "{{ az_recursive_managementgroups.management_groups + az_recursive_managementgroups.subscriptions }}" diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_mariadbserver/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_mariadbserver/aliases new file mode 100644 index 000000000..b586dc7c3 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_mariadbserver/aliases @@ -0,0 +1,8 @@ +cloud/azure +destructive +shippable/azure/group9 +azure_rm_mariadbserver_facts +azure_rm_mariadbdatabase +azure_rm_mariadbdatabase_facts +azure_rm_mariadbfirewallrule +azure_rm_mariadbfirewallrule_facts diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_mariadbserver/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_mariadbserver/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_mariadbserver/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_mariadbserver/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_mariadbserver/tasks/main.yml new file mode 100644 index 000000000..14d101a9a --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_mariadbserver/tasks/main.yml @@ -0,0 +1,640 @@ +- name: Prepare random number + set_fact: + rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + run_once: yes + +- name: Create instance of MariaDB Server -- check mode + azure_rm_mariadbserver: + resource_group: "{{ resource_group }}" + name: mariadbsrv{{ rpfx }} + sku: + name: B_Gen5_1 + tier: Basic + location: westus2 + storage_mb: 51200 + version: 10.2 + enforce_ssl: True + admin_username: zimxyz + admin_password: Password123! + check_mode: yes + register: output +- name: Assert the resource instance is well created + assert: + that: + - output.changed + +- name: Create instance of MariaDB Server + azure_rm_mariadbserver: + resource_group: "{{ resource_group }}" + name: mariadbsrv{{ rpfx }} + sku: + name: B_Gen5_1 + tier: Basic + location: westus2 + storage_mb: 51200 + version: 10.2 + enforce_ssl: True + admin_username: zimxyz + admin_password: Password123! + register: output +- name: Assert the resource instance is well created + assert: + that: + - output.changed + - output.state == 'Ready' + +- name: Create again instance of MariaDB Server + azure_rm_mariadbserver: + resource_group: "{{ resource_group }}" + name: mariadbsrv{{ rpfx }} + sku: + name: B_Gen5_1 + tier: Basic + location: westus2 + storage_mb: 51200 + version: 10.2 + enforce_ssl: True + admin_username: zimxyz + admin_password: Password123! + register: output +- name: Assert the state has not changed + assert: + that: + - output.changed == false + - output.state == 'Ready' + +- name: Update instance of MariaDB Server, change storage size + azure_rm_mariadbserver: + resource_group: "{{ resource_group }}" + name: mariadbsrv{{ rpfx }} + sku: + name: B_Gen5_1 + tier: Basic + location: westus2 + storage_mb: 128000 + version: 10.2 + enforce_ssl: True + admin_username: zimxyz + admin_password: Password123! + register: output +- name: Assert the state has not changed + assert: + that: + - output.changed + - output.state == 'Ready' +- debug: + var: output + +- name: Gather facts MariaDB Server + azure_rm_mariadbserver_info: + resource_group: "{{ resource_group }}" + name: mariadbsrv{{ rpfx }} + register: output +- name: Assert that storage size is correct + assert: + that: + - output.servers[0]['storage_mb'] == 128000 + +- name: Create second instance of MariaDB Server + azure_rm_mariadbserver: + resource_group: "{{ resource_group }}" + name: mariadbsrv{{ rpfx }}second + sku: + name: B_Gen5_1 + tier: Basic + location: westus2 + storage_mb: 51200 + version: 10.3 + enforce_ssl: True + admin_username: zimxyz + admin_password: Password123! + tags: + aaa: bbb + +- name: Create second instance of MariaDB Server + azure_rm_mariadbserver: + resource_group: "{{ resource_group }}" + name: mariadbsrv{{ rpfx }}second + sku: + name: B_Gen5_1 + tier: Basic + location: westus2 + storage_mb: 51200 + version: 10.3 + enforce_ssl: True + admin_username: zimxyz + admin_password: Password123! + tags: + ccc: ddd + +- name: Gather facts MariaDB Server + azure_rm_mariadbserver_info: + resource_group: "{{ resource_group }}" + name: mariadbsrv{{ rpfx }}second + register: output + +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.servers[0]['id'] != None + - output.servers[0]['name'] != None + - output.servers[0]['location'] != None + - output.servers[0]['sku']['name'] != None + - output.servers[0]['sku']['tier'] != None + - output.servers[0]['sku']['capacity'] != None + - output.servers[0]['version'] != None + - output.servers[0]['user_visible_state'] != None + - output.servers[0]['fully_qualified_domain_name'] != None + - output.servers[0]['tags']['aaa'] == 'bbb' + - output.servers[0]['tags']['ccc'] == 'ddd' + +- name: Gather facts MariaDB Server + azure_rm_mariadbserver_info: + resource_group: "{{ resource_group }}" + register: output +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.servers[0]['id'] != None + - output.servers[0]['name'] != None + - output.servers[0]['location'] != None + - output.servers[0]['sku']['name'] != None + - output.servers[0]['sku']['tier'] != None + - output.servers[0]['sku']['capacity'] != None + - output.servers[0]['version'] != None + - output.servers[0]['user_visible_state'] != None + - output.servers[0]['fully_qualified_domain_name'] != None + - output.servers[1]['id'] != None + - output.servers[1]['name'] != None + - output.servers[1]['location'] != None + - output.servers[1]['sku']['name'] != None + - output.servers[1]['sku']['tier'] != None + - output.servers[1]['sku']['capacity'] != None + - output.servers[1]['version'] != None + - output.servers[1]['user_visible_state'] != None + - output.servers[1]['fully_qualified_domain_name'] != None + +# +# azure_rm_mariadbdatabase tests below +# +- name: Create instance of MariaDB Database -- check mode + azure_rm_mariadbdatabase: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: testdatabase + check_mode: yes + register: output +- name: Assert the resource instance is well created + assert: + that: + - output.changed + +- name: Create instance of MariaDB Database + azure_rm_mariadbdatabase: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: testdatabase + collation: latin1_swedish_ci + charset: latin1 + register: output +- name: Assert the resource instance is well created + assert: + that: + - output.changed + - output.name == 'testdatabase' + +- name: Create again instance of MariaDB Database + azure_rm_mariadbdatabase: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: testdatabase + collation: latin1_swedish_ci + charset: latin1 + register: output +- name: Assert the state has not changed + assert: + that: + - output.changed == false + - output.name == 'testdatabase' + +- name: Try to update database without force_update + azure_rm_mariadbdatabase: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: testdatabase + collation: utf8_general_ci + charset: utf8 + ignore_errors: yes + register: output +- name: Assert that nothing has changed + assert: + that: + - output.changed == False + +- name: Update instance of database using force_update + azure_rm_mariadbdatabase: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: testdatabase + collation: utf8_general_ci + charset: utf8 + force_update: yes + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + - output.name == 'testdatabase' + +- name: Create second instance of MariaDB Database + azure_rm_mariadbdatabase: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: testdatabase2 + +- name: Gather facts MariaDB Database + azure_rm_mariadbdatabase_info: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: testdatabase + register: output + +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.databases[0]['server_name'] != None + - output.databases[0]['name'] != None + - output.databases[0]['charset'] != None + - output.databases[0]['collation'] != None + +- name: Gather facts MariaDB Database + azure_rm_mariadbdatabase_info: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + register: output +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.databases[0]['server_name'] != None + - output.databases[0]['name'] != None + - output.databases[0]['charset'] != None + - output.databases[0]['collation'] != None + - output.databases[1]['server_name'] != None + - output.databases[1]['name'] != None + - output.databases[1]['charset'] != None + - output.databases[1]['collation'] != None + +- name: Delete instance of MariaDB Database -- check mode + azure_rm_mariadbdatabase: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: testdatabase + state: absent + check_mode: yes + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Delete instance of MariaDB Database + azure_rm_mariadbdatabase: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: testdatabase + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Delete unexisting instance of MariaDB Database + azure_rm_mariadbdatabase: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: testdatabase + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed == false + +# +# azure_rm_firewallrule tests below +# +- name: Create instance of Firewall Rule -- check mode + azure_rm_mariadbfirewallrule: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: firewallrule{{ rpfx }} + start_ip_address: 172.28.10.136 + end_ip_address: 172.28.10.138 + check_mode: yes + register: output +- name: Assert the resource instance is well created + assert: + that: + - output.changed + +- name: Create instance of Firewall Rule + azure_rm_mariadbfirewallrule: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: firewallrule{{ rpfx }} + start_ip_address: 172.28.10.136 + end_ip_address: 172.28.10.138 + register: output +- name: Assert the resource instance is well created + assert: + that: + - output.changed + +- name: Create again instance of Firewall Rule + azure_rm_mariadbfirewallrule: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: firewallrule{{ rpfx }} + start_ip_address: 172.28.10.136 + end_ip_address: 172.28.10.138 + register: output +- name: Assert the state has not changed + assert: + that: + - output.changed == false + +- name: Delete instance of Firewall Rule -- check mode + azure_rm_mariadbfirewallrule: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: firewallrule{{ rpfx }} + state: absent + check_mode: yes + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Create instance of Firewall Rule -- second + azure_rm_mariadbfirewallrule: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: firewallrule{{ rpfx }}second + start_ip_address: 172.28.10.136 + end_ip_address: 172.28.10.138 + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Gather facts MariaDB Firewall Rule + azure_rm_mariadbfirewallrule_info: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: firewallrule{{ rpfx }} + register: output +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.rules[0].id != None + - output.rules[0].server_name != None + - output.rules[0].name != None + - output.rules[0].start_ip_address != None + - output.rules[0].end_ip_address != None + - "output.rules | length == 1" + +- name: Gather facts MariaDB Firewall Rule + azure_rm_mariadbfirewallrule_info: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + register: output +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.rules[0].id != None + - output.rules[0].server_name != None + - output.rules[0].name != None + - output.rules[0].start_ip_address != None + - output.rules[0].end_ip_address != None + - output.rules[1].id != None + - output.rules[1].name != None + - output.rules[1].start_ip_address != None + - output.rules[1].end_ip_address != None + - "output.rules | length == 2" + +- name: Delete instance of Firewall Rule + azure_rm_mariadbfirewallrule: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: firewallrule{{ rpfx }} + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Delete unexisting instance of Firewall Rule + azure_rm_mariadbfirewallrule: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: firewallrule{{ rpfx }} + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed == false + +- name: Delete instance of Firewall Rule - second + azure_rm_mariadbfirewallrule: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: firewallrule{{ rpfx }}second + state: absent + +- name: Gather facts MariaDB Firewall Rule + azure_rm_mariadbfirewallrule_info: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: firewallrule{{ rpfx }} + register: output +- name: Assert that empty list was returned + assert: + that: + - output.changed == False + - "output.rules | length == 0" + +# +# configuration +# +- name: Create instance of Configuration -- check mode + azure_rm_mariadbconfiguration: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: event_scheduler + value: "ON" + check_mode: yes + register: output +- name: Assert that change was registered + assert: + that: + - output.changed + +- name: Try to delete default configuraion + azure_rm_mariadbconfiguration_info: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: event_scheduler + register: output +- name: Get facts of event_scheduler + debug: + var: output + +- name: Try to delete default configuraion + azure_rm_mariadbconfiguration: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: event_scheduler + state: absent + register: output +- name: Assert that change was registered + assert: + that: + - not output.changed + +- name: Try to change default configuraion + azure_rm_mariadbconfiguration: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: event_scheduler + value: "ON" + register: output +- name: Assert that change was registered + assert: + that: + - output.changed + +- name: Try to change default configuration -- idempotent + azure_rm_mariadbconfiguration: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: event_scheduler + value: "ON" + register: output +- name: Assert that change was registered + assert: + that: + - not output.changed + +- name: Try to reset configuration + azure_rm_mariadbconfiguration: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: event_scheduler + state: absent + register: output +- name: Assert that change was registered + assert: + that: + - output.changed + +- name: Try to reset configuration -- idempotent + azure_rm_mariadbconfiguration: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: event_scheduler + state: absent + register: output +- name: Assert that change was registered + assert: + that: + - not output.changed + +- name: Gather facts MariaDB Configuration + azure_rm_mariadbconfiguration_info: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + name: event_scheduler + register: output +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.settings[0].id != None + - output.settings[0].name != None + - output.settings[0].value != None + - output.settings[0].description != None + - output.settings[0].source != None + - output.settings | length == 1 + +- name: Gather facts MariaDB Configuration + azure_rm_mariadbconfiguration_info: + resource_group: "{{ resource_group }}" + server_name: mariadbsrv{{ rpfx }} + register: output +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.settings[0].id != None + - output.settings[0].name != None + - output.settings[0].value != None + - output.settings[0].description != None + - output.settings[0].source != None + - output.settings | length > 1 + +# +# clean up azure_rm_mariadbserver test +# + +- name: Delete instance of MariaDB Server -- check mode + azure_rm_mariadbserver: + resource_group: "{{ resource_group }}" + name: mariadbsrv{{ rpfx }} + state: absent + check_mode: yes + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Delete instance of MariaDB Server + azure_rm_mariadbserver: + resource_group: "{{ resource_group }}" + name: mariadbsrv{{ rpfx }} + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Delete unexisting instance of MariaDB Server + azure_rm_mariadbserver: + resource_group: "{{ resource_group }}" + name: mariadbsrv{{ rpfx }} + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed == false + +- name: Delete second instance of MariaDB Server + azure_rm_mariadbserver: + resource_group: "{{ resource_group }}" + name: mariadbsrv{{ rpfx }}second + state: absent + async: 400 + poll: 0 diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_monitordiagnosticsetting/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_monitordiagnosticsetting/aliases new file mode 100644 index 000000000..35b940115 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_monitordiagnosticsetting/aliases @@ -0,0 +1,3 @@ +cloud/azure +destructive +unsupported \ No newline at end of file diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_monitordiagnosticsetting/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_monitordiagnosticsetting/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_monitordiagnosticsetting/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_monitordiagnosticsetting/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_monitordiagnosticsetting/tasks/main.yml new file mode 100644 index 000000000..a725a0b33 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_monitordiagnosticsetting/tasks/main.yml @@ -0,0 +1,504 @@ +- name: Prepare random number + set_fact: + rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + run_once: yes + +# ------ setup ------ + +- name: create virtual network + azure_rm_virtualnetwork: + name: "vnet-{{ rpfx }}" + resource_group: "{{ resource_group }}" + address_prefixes: "10.0.0.0/16" + register: vnet_output + +- name: create web app + azure_rm_webapp: + name: "webapp-{{ rpfx }}" + resource_group: "{{ resource_group }}" + plan: + name: "webapp-{{ rpfx }}-plan" + resource_group: "{{ resource_group }}" + is_linux: false + sku: S1 + register: webapp_output + +- name: create storage account + azure_rm_storageaccount: + name: "storage{{ rpfx }}" + resource_group: "{{ resource_group }}" + account_type: Standard_LRS + kind: StorageV2 + register: storage_output + +- name: create second storage account + azure_rm_storageaccount: + name: "storagesecond{{ rpfx }}" + resource_group: "{{ resource_group_secondary }}" + account_type: Standard_LRS + kind: StorageV2 + register: storage2_output + +- name: create event hub namespace + azure_rm_eventhub: + namespace_name: "hub-{{ rpfx }}" + resource_group: "{{ resource_group }}" + sku: "Basic" + +- name: create log analytics workspace + azure_rm_loganalyticsworkspace: + name: "analytics-{{ rpfx }}" + resource_group: "{{ resource_group_secondary }}" + retention_in_days: 30 + register: analytics_output + +# ------ tests ------ + +- name: get diagnostic settings for non-existant resource + azure_rm_monitordiagnosticsetting_info: + resource: "123abc" + register: output + failed_when: '"failed" not in output.msg | lower' + +- name: get diagnostic settings for virtual network + azure_rm_monitordiagnosticsetting_info: + resource: "{{ vnet_output.state.id }}" + register: output +- name: assert no settings + assert: + that: + - not output.changed + - output.settings | length == 0 + +- name: create storage-based diagnostic setting for vnet (check mode) + azure_rm_monitordiagnosticsetting: + name: "logs-storage" + resource: "{{ vnet_output.state.id }}" + storage_account: "{{ storage_output.state.id }}" + logs: + - category_group: "allLogs" + metrics: + - category: "AllMetrics" + check_mode: true + register: output +- name: assert resource created + assert: + that: + - output.changed + +- name: create storage-based diagnostic setting for vnet (actually create) + azure_rm_monitordiagnosticsetting: + name: "logs-storage" + resource: "{{ vnet_output.state.id }}" + storage_account: "{{ storage_output.state.id }}" + logs: + - category_group: "allLogs" + metrics: + - category: "AllMetrics" + register: output +- name: assert resource created + assert: + that: + - output.changed + - output.state.name == 'logs-storage' + - output.state.storage_account.id == storage_output.state.id + - output.state.logs | length == 1 + - output.state.logs[0].category_group == 'allLogs' + - output.state.logs[0].enabled + - output.state.metrics | length == 1 + - output.state.metrics[0].category == 'AllMetrics' + - output.state.metrics[0].enabled + - not output.state.event_hub + - not output.state.log_analytics + +- name: create storage-based diagnostic setting for vnet (idempotent) + azure_rm_monitordiagnosticsetting: + name: "logs-storage" + resource: "{{ vnet_output.state.id }}" + storage_account: "{{ storage_output.state.id }}" + logs: + - category_group: "allLogs" + metrics: + - category: "AllMetrics" + check_mode: true + register: output +- name: assert resource not changed + assert: + that: + - not output.changed + +- name: create storage-based diagnostic setting for vnet by resource dict (idempotent) + azure_rm_monitordiagnosticsetting: + name: "logs-storage" + resource: + name: "vnet-{{ rpfx }}" + type: "Microsoft.Network/virtualNetworks" + resource_group: "{{ resource_group }}" + storage_account: "{{ storage_output.state.id }}" + logs: + - category_group: "allLogs" + metrics: + - category: "AllMetrics" + register: output +- name: assert resource not changed + assert: + that: + - not output.changed + +- name: update storage-based diagnostic setting for vnet + azure_rm_monitordiagnosticsetting: + name: "logs-storage" + resource: "{{ vnet_output.state.id }}" + storage_account: "{{ storage_output.state.id }}" + logs: + - category_group: "allLogs" + register: output +- name: assert resource updated + assert: + that: + - output.changed + - output.state.name == 'logs-storage' + - output.state.storage_account.id == storage_output.state.id + - output.state.logs | length == 1 + - output.state.logs[0].category_group == 'allLogs' + - output.state.logs[0].enabled + - output.state.metrics | length == 0 + - not output.state.event_hub + - not output.state.log_analytics + +- name: create second storage-based diagnostic setting for vnet + azure_rm_monitordiagnosticsetting: + name: "logs-storage2" + resource: "{{ vnet_output.state.id }}" + storage_account: + name: "storagesecond{{ rpfx }}" + resource_group: "{{ resource_group_secondary }}" + logs: + - category: "VMProtectionAlerts" + retention_policy: + days: 15 + register: output +- name: assert resource created + assert: + that: + - output.changed + - output.state.name == 'logs-storage2' + - output.state.storage_account.id == storage2_output.state.id + - output.state.logs | length == 1 + - output.state.logs[0].category == 'VMProtectionAlerts' + - output.state.logs[0].enabled + - output.state.logs[0].retention_policy.days == 15 + - output.state.logs[0].retention_policy.enabled + - output.state.metrics | length == 0 + - not output.state.event_hub + - not output.state.log_analytics + +- name: update second storage-based diagnostic setting for vnet + azure_rm_monitordiagnosticsetting: + name: "logs-storage2" + resource: "{{ vnet_output.state.id }}" + storage_account: + name: "storagesecond{{ rpfx }}" + resource_group: "{{ resource_group_secondary }}" + logs: + - category: "VMProtectionAlerts" + retention_policy: + days: 30 + register: output +- name: assert resource created + assert: + that: + - output.changed + - output.state.logs[0].retention_policy.days == 30 + +- name: update second storage-based diagnostic setting for vnet (idempotent) + azure_rm_monitordiagnosticsetting: + name: "logs-storage2" + resource: "{{ vnet_output.state.id }}" + storage_account: + name: "storagesecond{{ rpfx }}" + resource_group: "{{ resource_group_secondary }}" + logs: + - category: "VMProtectionAlerts" + retention_policy: + days: 30 + register: output +- name: assert resource not changed + assert: + that: + - not output.changed + +- name: get diagnostic settings for virtual network + azure_rm_monitordiagnosticsetting_info: + resource: "{{ vnet_output.state.id }}" + register: output +- name: assert output maches expectations + assert: + that: + - output.settings | length == 2 + - output.settings[0].name == 'logs-storage' + - output.settings[0].storage_account.id == storage_output.state.id + - output.settings[0].logs | length == 1 + - output.settings[0].logs[0].category_group == 'allLogs' + - output.settings[0].logs[0].enabled + - output.settings[0].metrics | length == 0 + - not output.settings[0].event_hub + - not output.settings[0].log_analytics + - output.settings[1].name == 'logs-storage2' + - output.settings[1].storage_account.id == storage2_output.state.id + - output.settings[1].logs | length == 1 + - output.settings[1].logs[0].category == 'VMProtectionAlerts' + - output.settings[1].logs[0].enabled + - output.settings[1].logs[0].retention_policy.days == 30 + - output.settings[1].logs[0].retention_policy.enabled + - output.settings[1].metrics | length == 0 + - not output.settings[1].event_hub + - not output.settings[1].log_analytics + +- name: get specific diagnostic settings for virtual network + azure_rm_monitordiagnosticsetting_info: + name: "logs-storage2" + resource: "{{ vnet_output.state.id }}" + register: output +- name: assert output maches expectations + assert: + that: + - output.settings | length == 1 + - output.settings[0].name == 'logs-storage2' + - output.settings[0].storage_account.id == storage2_output.state.id + - output.settings[0].logs | length == 1 + - output.settings[0].logs[0].category == 'VMProtectionAlerts' + - output.settings[0].logs[0].enabled + - output.settings[0].logs[0].retention_policy.days == 30 + - output.settings[0].logs[0].retention_policy.enabled + - output.settings[0].metrics | length == 0 + - not output.settings[0].event_hub + - not output.settings[0].log_analytics + +- name: get non-existant diagnostic setting for virtual network + azure_rm_monitordiagnosticsetting_info: + name: "does-not-exist" + resource: "{{ vnet_output.state.id }}" + register: output +- name: assert no settings + assert: + that: + - output.settings | length == 0 + +- name: create diagnostic setting for webapp with log analytics, event hub, and storage + azure_rm_monitordiagnosticsetting: + name: "logs" + resource: + name: "webapp-{{ rpfx }}" + type: "Microsoft.Web/sites" + resource_group: "{{ resource_group }}" + event_hub: + namespace: "hub-{{ rpfx }}" + policy: "RootManageSharedAccessKey" + log_analytics: + name: "analytics-{{ rpfx }}" + resource_group: "{{ resource_group_secondary }}" + storage_account: + name: "storage{{ rpfx }}" + logs: + - category: "AppServiceHTTPLogs" + - category: "AppServiceConsoleLogs" + - category: "AppServiceAppLogs" + - category: "AppServiceAuditLogs" + - category: "AppServiceIPSecAuditLogs" + - category: "AppServicePlatformLogs" + register: output +- name: assert resource created + assert: + that: + - output.changed + - output.state.name == 'logs' + - output.state.storage_account.id == storage_output.state.id + - output.state.event_hub.namespace == 'hub-{{ rpfx }}' + - output.state.event_hub.policy == 'RootManageSharedAccessKey' + - output.state.log_analytics.id | lower == analytics_output.id | lower + - output.state.logs | length == 6 + - output.state.logs[0].category == 'AppServiceHTTPLogs' + - output.state.logs[1].category == 'AppServiceConsoleLogs' + - output.state.logs[2].category == 'AppServiceAppLogs' + - output.state.logs[3].category == 'AppServiceAuditLogs' + - output.state.logs[4].category == 'AppServiceIPSecAuditLogs' + - output.state.logs[5].category == 'AppServicePlatformLogs' + - output.state.metrics | length == 0 + +- name: create diagnostic setting for webapp with log analytics, event hub, and storage (idempotent) + azure_rm_monitordiagnosticsetting: + name: "logs" + resource: + name: "webapp-{{ rpfx }}" + type: "Microsoft.Web/sites" + resource_group: "{{ resource_group }}" + event_hub: + namespace: "hub-{{ rpfx }}" + policy: "RootManageSharedAccessKey" + log_analytics: + name: "analytics-{{ rpfx }}" + resource_group: "{{ resource_group_secondary }}" + storage_account: + name: "storage{{ rpfx }}" + logs: + - category: "AppServiceHTTPLogs" + - category: "AppServiceConsoleLogs" + - category: "AppServiceAppLogs" + - category: "AppServiceAuditLogs" + - category: "AppServiceIPSecAuditLogs" + - category: "AppServicePlatformLogs" + register: output +- name: assert resource created + assert: + that: + - not output.changed + +- name: update diagnostic setting to remove storage and log category + azure_rm_monitordiagnosticsetting: + name: "logs" + resource: + name: "webapp-{{ rpfx }}" + type: "Microsoft.Web/sites" + resource_group: "{{ resource_group }}" + event_hub: + namespace: "hub-{{ rpfx }}" + policy: "RootManageSharedAccessKey" + log_analytics: + name: "analytics-{{ rpfx }}" + resource_group: "{{ resource_group_secondary }}" + logs: + - category: "AppServiceHTTPLogs" + - category: "AppServiceAppLogs" + - category: "AppServiceAuditLogs" + - category: "AppServiceIPSecAuditLogs" + - category: "AppServicePlatformLogs" + register: output +- name: assert resource updated + assert: + that: + - output.changed + - not output.state.storage_account + - output.state.logs | length == 5 + - output.state.logs[0].category == 'AppServiceHTTPLogs' + - output.state.logs[1].category == 'AppServiceAppLogs' + - output.state.logs[2].category == 'AppServiceAuditLogs' + - output.state.logs[3].category == 'AppServiceIPSecAuditLogs' + - output.state.logs[4].category == 'AppServicePlatformLogs' + +- name: delete diagnostic setting via resource dict + azure_rm_monitordiagnosticsetting: + name: "logs" + resource: + name: "webapp-{{ rpfx }}" + type: "Microsoft.Web/sites" + resource_group: "{{ resource_group }}" + state: "absent" + register: output +- name: assert resource delete + assert: + that: + - output.changed + +- name: delete diagnostic setting via resource dict (idempotent) + azure_rm_monitordiagnosticsetting: + name: "logs" + resource: + name: "webapp-{{ rpfx }}" + type: "Microsoft.Web/sites" + resource_group: "{{ resource_group }}" + state: "absent" + register: output +- name: assert resource delete + assert: + that: + - not output.changed + +- name: delete diagnostic setting (check mode) + azure_rm_monitordiagnosticsetting: + name: "logs-storage" + resource: "{{ vnet_output.state.id }}" + state: "absent" + check_mode: true + register: output +- name: assert resource deleted + assert: + that: + - output.changed + +- name: delete diagnostic setting (actually delete) + azure_rm_monitordiagnosticsetting: + name: "logs-storage" + resource: "{{ vnet_output.state.id }}" + state: "absent" + register: output +- name: assert resource deleted + assert: + that: + - output.changed + +- name: delete diagnostic setting (idempotent) + azure_rm_monitordiagnosticsetting: + name: "logs-storage" + resource: "{{ vnet_output.state.id }}" + state: "absent" + register: output +- name: assert resource already deleted + assert: + that: + - not output.changed + +- name: delete second diagnostic setting + azure_rm_monitordiagnosticsetting: + name: "logs-storage2" + resource: "{{ vnet_output.state.id }}" + state: "absent" + register: output +- name: assert resource deleted + assert: + that: + - output.changed + +# ------ teardown ------ + +- name: delete log analytics workspace + azure_rm_loganalyticsworkspace: + name: "analytics-{{ rpfx }}" + resource_group: "{{ resource_group_secondary }}" + state: "absent" + +- name: delete event hub namespace + azure_rm_eventhub: + namespace_name: "hub-{{ rpfx }}" + resource_group: "{{ resource_group }}" + state: "absent" + +- name: delete second storage account + azure_rm_storageaccount: + name: "storagesecond{{ rpfx }}" + resource_group: "{{ resource_group_secondary }}" + state: "absent" + +- name: delete storage account + azure_rm_storageaccount: + name: "storage{{ rpfx }}" + resource_group: "{{ resource_group }}" + state: "absent" + +- name: delete web app + azure_rm_webapp: + name: "webapp-{{ rpfx }}" + resource_group: "{{ resource_group }}" + state: "absent" + +- name: delete web app plan + azure_rm_appserviceplan: + name: "webapp-{{ rpfx }}-plan" + resource_group: "{{ resource_group }}" + state: "absent" + +- name: delete virtual network + azure_rm_virtualnetwork: + name: "vnet-{{ rpfx }}" + resource_group: "{{ resource_group }}" + state: "absent" diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_monitorlogprofile/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_monitorlogprofile/aliases new file mode 100644 index 000000000..35b940115 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_monitorlogprofile/aliases @@ -0,0 +1,3 @@ +cloud/azure +destructive +unsupported \ No newline at end of file diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_monitorlogprofile/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_monitorlogprofile/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_monitorlogprofile/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_monitorlogprofile/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_monitorlogprofile/tasks/main.yml new file mode 100644 index 000000000..dc30caf98 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_monitorlogprofile/tasks/main.yml @@ -0,0 +1,133 @@ +- name: Prepare random number + set_fact: + storage_name: "storage{{ resource_group | hash('md5') | truncate(5, True, '') }}{{ 1000 | random }}" + profile_name: "profile{{ resource_group | hash('md5') | truncate(5, True, '') }}{{ 1000 | random }}" + location: eastus + run_once: yes + +- name: create Azure storage account + azure_rm_storageaccount: + name: '{{ storage_name }}' + resource_group: "{{ resource_group }}" + account_type: Standard_LRS + +- name: create log profile (check mode) + azure_rm_monitorlogprofile: + name: "{{ profile_name }}" + location: "{{ location }}" + locations: + - eastus + - westus + categories: + - Write + - Action + retention_policy: + enabled: False + days: 1 + storage_account: + resource_group: "{{ resource_group }}" + name: "{{ storage_name }}" + check_mode: yes + register: output + +- name: assert create check mode + assert: + that: + - output.changed + +- name: create log profile + azure_rm_monitorlogprofile: + name: "{{ profile_name }}" + location: "{{ location }}" + locations: + - eastus + - westus + categories: + - Write + - Action + retention_policy: + enabled: False + days: 1 + storage_account: + resource_group: "{{ resource_group }}" + name: "{{ storage_name }}" + register: output +- name: assert create + assert: + that: + - output.changed + - output.id + +- name: update log profile (idempotence) + azure_rm_monitorlogprofile: + name: "{{ profile_name }}" + location: "{{ location }}" + locations: + - eastus + - westus + categories: + - Write + - Action + retention_policy: + enabled: False + days: 1 + storage_account: + resource_group: "{{ resource_group }}" + name: "{{ storage_name }}" + register: output +- name: assert update idempotence + assert: + that: + - not output.changed + +- name: update log profile + azure_rm_monitorlogprofile: + name: "{{ profile_name }}" + location: "{{ location }}" + locations: + - eastus + categories: + - Write + - Action + retention_policy: + enabled: False + days: 2 + storage_account: + resource_group: "{{ resource_group }}" + name: "{{ storage_name }}" + register: output +- name: assert update + assert: + that: + - output.changed + +- name: delete log profile (check mode) + azure_rm_monitorlogprofile: + name: "{{ profile_name }}" + state: absent + register: output + check_mode: true +- name: assert delete + assert: + that: + - output.changed + +- name: delete log profile + azure_rm_monitorlogprofile: + name: "{{ profile_name }}" + state: absent + register: output +- name: assert delete + assert: + that: + - output.changed + +- name: delete log profile (idempotence) + azure_rm_monitorlogprofile: + name: "{{ profile_name }}" + state: absent + register: output +- name: assert delete + assert: + that: + - not output.changed diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_multiplemanageddisks/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_multiplemanageddisks/aliases new file mode 100644 index 000000000..30f78fd74 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_multiplemanageddisks/aliases @@ -0,0 +1,5 @@ +cloud/azure +shippable/azure/group12 +destructive +azure_rm_manageddisk_info +azure_rm_multiplemanageddisks diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_multiplemanageddisks/defaults/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_multiplemanageddisks/defaults/main.yml new file mode 100644 index 000000000..18b02c3b1 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_multiplemanageddisks/defaults/main.yml @@ -0,0 +1,30 @@ +--- +async_virtual_network: "async-vnet" +async_subnet: "async-subnet" +async_virtual_network_cidr: "172.10.0.0/16" +async_subnet_cidr: "172.10.0.0/24" +async_virtual_machine_prefix: "async-vm" +async_number_virtual_machine: 3 +async_number_disk_to_attach: 3 + +shared_virtual_network: "shareddisk-vnet" +shared_subnet: "shareddisk-subnet" +shared_virtual_network_cidr: "10.10.0.0/16" +shared_subnet_cidr: "10.10.0.0/24" +shared_virtual_machine_prefix: "shareddisk-vm" +shared_virtual_machines: + - name: "shareddisk-vm-1" + resource_group: "{{ resource_group_secondary }}" + - name: "shareddisk-vm-2" + resource_group: "{{ resource_group_secondary }}" +shared_disks: + - resource_group: "{{ resource_group_secondary }}" + name: "shareddisk-01" + disk_size_gb: 4 + storage_account_type: StandardSSD_LRS + max_shares: 3 + - resource_group: "{{ resource_group_secondary }}" + name: "shareddisk-02" + disk_size_gb: 4 + storage_account_type: StandardSSD_LRS + max_shares: 3 diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_multiplemanageddisks/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_multiplemanageddisks/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_multiplemanageddisks/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_multiplemanageddisks/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_multiplemanageddisks/tasks/main.yml new file mode 100644 index 000000000..78b712653 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_multiplemanageddisks/tasks/main.yml @@ -0,0 +1,2 @@ +- include_tasks: tasks/test_shared.yml +- include_tasks: tasks/test_async.yml diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_multiplemanageddisks/tasks/test_async.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_multiplemanageddisks/tasks/test_async.yml new file mode 100644 index 000000000..448a54dde --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_multiplemanageddisks/tasks/test_async.yml @@ -0,0 +1,160 @@ +# setup: create virtual machines to attach disks on +- name: Create virtual network + azure_rm_virtualnetwork: + resource_group: "{{ resource_group_secondary }}" + name: "{{ async_virtual_network }}-{{ item }}" + address_prefixes: "{{ async_virtual_network_cidr }}" + with_sequence: start=1 end="{{ async_number_virtual_machine }}" + +- name: Add subnet + azure_rm_subnet: + resource_group: "{{ resource_group_secondary }}" + name: "{{ async_subnet }}-{{ item }}" + address_prefix: "{{ async_subnet_cidr }}" + virtual_network: "{{ async_virtual_network }}-{{ item }}" + with_sequence: start=1 end="{{ async_number_virtual_machine }}" + +- name: Create virtual machines + azure_rm_virtualmachine: + resource_group: "{{ resource_group_secondary }}" + name: "{{ async_virtual_machine_prefix }}-{{ item }}" + vm_size: Standard_E2_v3 + managed_disk_type: Standard_LRS + virtual_network: "{{ async_virtual_network }}-{{ item }}" + admin_username: adminuser + admin_password: "!test123@" + os_type: Linux + image: + offer: UbuntuServer + publisher: Canonical + sku: 16.04-LTS + version: latest + tags: + key: value + with_sequence: start=1 end="{{ async_number_virtual_machine }}" + register: create_vms + async: 1000 + poll: 0 + +- name: Wait for Virtual machine to be created + async_status: + jid: "{{ item.ansible_job_id }}" + register: wait_create_vm + until: wait_create_vm.finished + retries: 100 + delay: 5 + loop: "{{ create_vms.results }}" + +- name: Retrieve running virtual machine + azure_rm_virtualmachine_info: + resource_group: "{{ resource_group_secondary }}" + tags: + - key + register: _vms + +- set_fact: + attach_disk_config: "{{ lookup('template', 'disk_config.j2') | from_yaml }}" + vars: + virtual_machines: "{{ _vms.vms }}" + +- name: Create and Attach disks to virtual machine + azure.azcollection.azure_rm_multiplemanageddisks: + managed_disks: "{{ item.disks }}" + managed_by_extended: + - "{{ item.virtual_machine }}" + register: azure_disks + async: 1000 + poll: 0 + with_items: "{{ attach_disk_config }}" + +- name: Wait for disks to be created and attached + async_status: + jid: "{{ item.ansible_job_id }}" + register: attach_disk + until: attach_disk.finished + retries: 100 + delay: 5 + loop: "{{ azure_disks.results }}" + +- name: Get disk info + azure_rm_manageddisk_info: + name: "{{ item.name }}" + resource_group: "{{ item.resource_group }}" + register: disks_info + with_items: "{{ attach_disk_config | map(attribute='disks') | flatten | list }}" + +- name: Validate that disks are attached to VMs + assert: + that: + - result.ansible_info.azure_managed_disk[0].managed_by + with_items: "{{ disks_info.results }}" + loop_control: + loop_var: result + +- name: Detach disks from virtual machine + azure.azcollection.azure_rm_multiplemanageddisks: + managed_disks: "{{ item.disks }}" + managed_by_extended: + - "{{ item.virtual_machine }}" + state: absent + register: azure_disks + async: 1000 + poll: 0 + with_items: "{{ attach_disk_config }}" + +- name: Wait for disks to be created and attached + async_status: + jid: "{{ item.ansible_job_id }}" + register: attach_disk + until: attach_disk.finished + retries: 100 + delay: 5 + loop: "{{ azure_disks.results }}" + +- name: Get disk info + azure_rm_manageddisk_info: + name: "{{ item.name }}" + resource_group: "{{ item.resource_group }}" + register: disks_info + with_items: "{{ attach_disk_config | map(attribute='disks') | flatten | list }}" + +- name: Validate that disks are attached to VMs + assert: + that: + - not result.ansible_info.azure_managed_disk[0].managed_by + with_items: "{{ disks_info.results }}" + loop_control: + loop_var: result + +- name: Delete managed disks + azure.azcollection.azure_rm_multiplemanageddisks: + managed_disks: "{{ item.disks }}" + state: absent + register: azure_disks + async: 1000 + poll: 0 + with_items: "{{ attach_disk_config }}" + +- name: Wait for disks to be created and attached + async_status: + jid: "{{ item.ansible_job_id }}" + register: attach_disk + until: attach_disk.finished + retries: 100 + delay: 5 + loop: "{{ azure_disks.results }}" + +- name: Get disk info + azure_rm_manageddisk_info: + name: "{{ item.name }}" + resource_group: "{{ item.resource_group }}" + register: disks_info + with_items: "{{ attach_disk_config | map(attribute='disks') | flatten | list }}" + +- name: Validate that disks are attached to VMs + assert: + that: + - result.ansible_info.azure_managed_disk == [] + with_items: "{{ disks_info.results }}" + loop_control: + loop_var: result diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_multiplemanageddisks/tasks/test_shared.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_multiplemanageddisks/tasks/test_shared.yml new file mode 100644 index 000000000..853ef8ed4 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_multiplemanageddisks/tasks/test_shared.yml @@ -0,0 +1,341 @@ +# Create managed disks with missing required parameters +- name: Create managed disks with missing required parameters + azure.azcollection.azure_rm_multiplemanageddisks: + managed_disks: + - resource_group: "{{ resource_group_secondary }}" + name: "disk-copy-without-source-uri" + create_option: copy + max_shares: 3 + - resource_group: "{{ resource_group_secondary }}" + name: "disk-import-without-storage-account" + create_option: import + max_shares: 3 + - resource_group: "{{ resource_group_secondary }}" + name: "disk-empty-without-disk-size" + create_option: empty + max_shares: 3 + register: result + ignore_errors: true + +- name: Validate that disk creation failed + assert: + that: + - result is failed + - 'result.msg == "Some required options are missing from managed disks configuration."' + - '"managed disk {{ resource_group_secondary }}/disk-copy-without-source-uri has create_option set to copy but not all required parameters (source_uri) are set." in result.errors' + - '"managed disk {{ resource_group_secondary }}/disk-import-without-storage-account has create_option set to import but not all required parameters (source_uri,storage_account_id) are set." in result.errors' + - '"managed disk {{ resource_group_secondary }}/disk-empty-without-disk-size has create_option set to empty but not all required parameters (disk_size_gb) are set." in result.errors' + +# Test managed disks creation +- name: Create managed disks + azure.azcollection.azure_rm_multiplemanageddisks: + managed_disks: "{{ shared_disks }}" + register: azure_disks + +- name: Ensure result is changed + assert: + that: + - azure_disks is changed + - azure_disks.state | length == 2 + +- name: Ensure disks exist + azure_rm_manageddisk_info: + name: "{{ item.name }}" + resource_group: "{{ item.resource_group }}" + register: azure_disks_info + with_items: "{{ shared_disks }}" + +- name: Create disks again to validate idempotency + azure.azcollection.azure_rm_multiplemanageddisks: + managed_disks: "{{ shared_disks }}" + register: create_idempotency + +- name: Ensure result is not changed + assert: + that: + - create_idempotency is not changed + +- name: Validate that disks have been created + assert: + that: + - item.name in disk_names + with_items: "{{ shared_disks }}" + vars: + disk_names: "{{ azure_disks_info.results | map(attribute='ansible_info') | list | map(attribute='azure_managed_disk') | flatten | map(attribute='name') | list }}" + +# setup: create virtual machines to attach disks on +- name: Create virtual network + azure_rm_virtualnetwork: + resource_group: "{{ resource_group_secondary }}" + name: "vnet-{{ item.name }}" + address_prefixes: "{{ shared_virtual_network_cidr }}" + with_items: "{{ shared_virtual_machines }}" + +- name: Add subnet + azure_rm_subnet: + resource_group: "{{ resource_group_secondary }}" + name: "sub-{{ item.name }}" + address_prefix: "{{ shared_subnet_cidr }}" + virtual_network: "vnet-{{ item.name }}" + with_items: "{{ shared_virtual_machines }}" + +- name: Create virtual machines + azure_rm_virtualmachine: + resource_group: "{{ item.resource_group }}" + name: "{{ item.name }}" + vm_size: Standard_E2_v3 + virtual_network: "vnet-{{ item.name }}" + managed_disk_type: Standard_LRS + admin_username: adminuser + admin_password: "!test123@" + os_type: Linux + image: + offer: UbuntuServer + publisher: Canonical + sku: 16.04-LTS + version: latest + with_items: "{{ shared_virtual_machines }}" + register: create_vms + async: 1000 + poll: 0 + +- name: Wait for Virtual machine to be created + async_status: + jid: "{{ item.ansible_job_id }}" + register: wait_create_vm + until: wait_create_vm.finished + retries: 100 + delay: 5 + loop: "{{ create_vms.results }}" + +- name: Get virtual machine info + azure_rm_virtualmachine_info: + resource_group: "{{ item.resource_group }}" + name: "{{ item.name }}" + register: vm_result + with_items: "{{ shared_virtual_machines }}" + +- set_fact: + vm_ids: "{{ vm_result.results | map(attribute='vms') | flatten | map(attribute='id') | list }}" + +# Test attach disk on VMs +- name: Attach existing disks to VMs + azure.azcollection.azure_rm_multiplemanageddisks: + managed_disks: "{{ shared_disks }}" + managed_by_extended: + - "{{ item }}" + register: attach_disks + with_items: "{{ shared_virtual_machines }}" + +- name: Ensure result is changed + assert: + that: + - attach_disks is changed + - attach_disks.results | length == 2 + +- name: Retrieve managed disks info + azure_rm_manageddisk_info: + resource_group: "{{ item.resource_group }}" + name: "{{ item.name }}" + register: azure_disks + with_items: "{{ shared_disks }}" + +- name: Validate disks are attached to the VMs + assert: + that: + - azure_managed_disks | length == 2 + - azure_managed_disks.0.managed_by_extended | length == 2 + - azure_managed_disks.1.managed_by_extended | length == 2 + - vm_ids[0] in azure_managed_disks.0.managed_by_extended + - vm_ids[1] in azure_managed_disks.0.managed_by_extended + - vm_ids[0] in azure_managed_disks.1.managed_by_extended + - vm_ids[1] in azure_managed_disks.1.managed_by_extended + vars: + azure_managed_disks: "{{ azure_disks.results | map(attribute='ansible_info') | list | map(attribute='azure_managed_disk') | flatten | list }}" + +- name: Attach disks once again (idempotency) + azure.azcollection.azure_rm_multiplemanageddisks: + managed_disks: "{{ shared_disks }}" + managed_by_extended: "{{ shared_virtual_machines }}" + register: attach_idempotency + +- name: Ensure result is not changed + assert: + that: + - attach_idempotency is not changed + +# Test: Detach managed disk from specific VM +- name: Detach disk item 1 from VM item 1 + azure.azcollection.azure_rm_multiplemanageddisks: + managed_disks: + - "{{ shared_disks[0] }}" + managed_by_extended: + - "{{ shared_virtual_machines[0] }}" + state: absent + register: detach_disks + +- name: Ensure result is changed + assert: + that: + - detach_disks is changed + +- name: Retrieve managed disks info + azure_rm_manageddisk_info: + resource_group: "{{ item.resource_group }}" + name: "{{ item.name }}" + register: azure_disks + with_items: "{{ shared_disks }}" + +- name: Ensure disk 1 is attached to VM 1 only and disk 2 is attached to VM 1 and VM 2 + assert: + that: + - azure_managed_disks | length == 2 + - azure_managed_disks.0.managed_by_extended | length == 1 + - azure_managed_disks.1.managed_by_extended | length == 2 + - azure_managed_disks.0.managed_by == vm_ids[1] + - vm_ids[0] not in azure_managed_disks.0.managed_by_extended + - vm_ids[1] in azure_managed_disks.0.managed_by_extended + - vm_ids[0] in azure_managed_disks.1.managed_by_extended + - vm_ids[1] in azure_managed_disks.1.managed_by_extended + vars: + azure_managed_disks: "{{ azure_disks.results | map(attribute='ansible_info') | list | map(attribute='azure_managed_disk') | flatten | list }}" + +- name: Detach disks once again (idempotency) + azure.azcollection.azure_rm_multiplemanageddisks: + managed_disks: + - "{{ shared_disks[0] }}" + managed_by_extended: + - "{{ shared_virtual_machines[0] }}" + state: absent + register: detach_idempotency + +- name: Ensure result is not changed + assert: + that: + - detach_idempotency is not changed + +# Test Detach disks from all VMs +- name: Detach disk 2 from all VMs + azure.azcollection.azure_rm_multiplemanageddisks: + managed_disks: + - "{{ shared_disks[1] }}" + managed_by_extended: [] + state: present + register: detach_from_vms + +- name: Retrieve managed disks info + azure_rm_manageddisk_info: + resource_group: "{{ shared_disks[1].resource_group }}" + name: "{{ shared_disks[1].name }}" + register: azure_disks + +- name: Ensure disk is not attached to any VM + assert: + that: + - detach_from_vms is changed + - azure_disks.ansible_info.azure_managed_disk | length == 1 + - not azure_disks.ansible_info.azure_managed_disk.0.managed_by + - not azure_disks.ansible_info.azure_managed_disk.0.managed_by_extended + +- name: Detach disk 2 from all VMs once again (idempotency) + azure.azcollection.azure_rm_multiplemanageddisks: + managed_disks: + - "{{ shared_disks[1] }}" + managed_by_extended: [] + state: present + register: detach_from_vms + +- name: Ensure nothing changed + assert: + that: + - detach_from_vms is not changed + +# Test delete disk attached to a VM +- name: Retrieve managed disks info + azure_rm_manageddisk_info: + resource_group: "{{ shared_disks[0].resource_group }}" + name: "{{ shared_disks[0].name }}" + register: azure_disks + +- name: Ensure disk is attached to at least one VM + assert: + that: + - azure_disks.ansible_info.azure_managed_disk.0.managed_by + - azure_disks.ansible_info.azure_managed_disk.0.managed_by_extended | length > 0 + +- name: Delete managed disk attached to VM + azure.azcollection.azure_rm_multiplemanageddisks: + managed_disks: + - "{{ shared_disks[0] }}" + state: absent + register: delete_attached_disk + +- name: Retrieve managed disks info + azure_rm_manageddisk_info: + resource_group: "{{ shared_disks[0].resource_group }}" + name: "{{ shared_disks[0].name }}" + register: azure_disks + +- name: Ensure disk was deleted + assert: + that: + - delete_attached_disk is changed + - azure_disks.ansible_info.azure_managed_disk | length == 0 + +- name: Delete managed disk once again (idempotency) + azure.azcollection.azure_rm_multiplemanageddisks: + managed_disks: + - "{{ shared_disks[0] }}" + state: absent + register: delete_attached_disk + +- name: Ensure delete idempotency + assert: + that: + - delete_attached_disk is not changed + +# Test managed disks deletion with managed_by_extended set to [] +- name: Trying to delete managed disks with managed_by_extended set to [] + azure.azcollection.azure_rm_multiplemanageddisks: + managed_disks: + - "{{ shared_disks[1] }}" + managed_by_extended: [] + state: absent + register: delete_with_wrong_parameter + +- name: Ensure disks were not deleted + assert: + that: + - delete_with_wrong_parameter is not changed + +# Test managed disks deletion +- name: Delete managed disks + azure.azcollection.azure_rm_multiplemanageddisks: + managed_disks: + - "{{ shared_disks[1] }}" + state: absent + register: delete_disk + +- name: Retrieve managed disks info + azure_rm_manageddisk_info: + resource_group: "{{ shared_disks[1].resource_group }}" + name: "{{ shared_disks[1].name }}" + register: azure_disks + +- name: Ensure disk was deleted + assert: + that: + - delete_disk is changed + - azure_disks.ansible_info.azure_managed_disk | length == 0 + +- name: Delete managed disks once again (idempotency) + azure.azcollection.azure_rm_multiplemanageddisks: + managed_disks: + - "{{ shared_disks[1] }}" + state: absent + register: delete_idempotency + +- name: Ensure nothing changed + assert: + that: + - delete_idempotency is not changed diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_multiplemanageddisks/templates/disk_config.j2 b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_multiplemanageddisks/templates/disk_config.j2 new file mode 100644 index 000000000..212bb9c03 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_multiplemanageddisks/templates/disk_config.j2 @@ -0,0 +1,11 @@ +{% for i in range(virtual_machines|length) %} +- disks: +{% for d in range(async_number_disk_to_attach) %} + - disk_size_gb: 1 + name: "{{ virtual_machines[i].name }}-disk-{{ d }}" + resource_group: "{{ resource_group_secondary }}" +{% endfor %} + virtual_machine: + name: "{{ virtual_machines[i].name }}" + resource_group: "{{ resource_group_secondary }}" +{% endfor %} diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_mysqlserver/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_mysqlserver/aliases new file mode 100644 index 000000000..21e7a127b --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_mysqlserver/aliases @@ -0,0 +1,10 @@ +cloud/azure +destructive +shippable/azure/group13 +azure_rm_mysqlserver_facts +azure_rm_mysqldatabase +azure_rm_mysqldatabase_facts +azure_rm_mysqlfirewallrule +azure_rm_mysqlfirewallrule_facts +azure_rm_mysqlconfiguration +azure_rm_mysqlconfiguration_facts diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_mysqlserver/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_mysqlserver/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_mysqlserver/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_mysqlserver/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_mysqlserver/tasks/main.yml new file mode 100644 index 000000000..75475bc3b --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_mysqlserver/tasks/main.yml @@ -0,0 +1,675 @@ +- name: Prepare random number + set_fact: + rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + run_once: yes + +- name: Create instance of MySQL Server -- check mode + azure_rm_mysqlserver: + resource_group: "{{ resource_group }}" + name: mysqlsrv{{ rpfx }} + sku: + name: B_Gen5_1 + tier: Basic + location: westus2 + storage_profile: + storage_mb: 51200 + backup_retention_days: 7 + geo_redundant_backup: Disabled + storage_autogrow: Disabled + version: 5.7 + enforce_ssl: True + admin_username: zimxyz + admin_password: Password123! + check_mode: yes + register: output +- name: Assert the resource instance is well created + assert: + that: + - output.changed + +- name: Create instance of MySQL Server + azure_rm_mysqlserver: + resource_group: "{{ resource_group }}" + name: mysqlsrv{{ rpfx }} + sku: + name: B_Gen5_1 + tier: Basic + location: westus2 + storage_profile: + storage_mb: 51200 + backup_retention_days: 7 + geo_redundant_backup: Disabled + storage_autogrow: Disabled + version: 5.7 + enforce_ssl: True + admin_username: zimxyz + admin_password: Password123! + register: output +- name: Assert the resource instance is well created + assert: + that: + - output.changed + - output.state == 'Ready' + +- name: Create again instance of MySQL Server + azure_rm_mysqlserver: + resource_group: "{{ resource_group }}" + name: mysqlsrv{{ rpfx }} + sku: + name: B_Gen5_1 + tier: Basic + location: westus2 + storage_profile: + storage_mb: 51200 + backup_retention_days: 7 + geo_redundant_backup: Disabled + storage_autogrow: Disabled + version: 5.7 + enforce_ssl: True + admin_username: zimxyz + admin_password: Password123! + register: output +- name: Assert the state has not changed + assert: + that: + - output.changed == false + - output.state == 'Ready' + +- name: Update instance of MySQL Server, change storage size + azure_rm_mysqlserver: + resource_group: "{{ resource_group }}" + name: mysqlsrv{{ rpfx }} + sku: + name: B_Gen5_1 + tier: Basic + location: westus2 + storage_profile: + storage_mb: 128000 + backup_retention_days: 7 + geo_redundant_backup: Disabled + storage_autogrow: Disabled + version: 5.7 + enforce_ssl: True + admin_username: zimxyz + admin_password: Password123! + register: output +- name: Assert the state has not changed + assert: + that: + - output.changed + - output.state == 'Ready' +- debug: + var: output + +- name: Gather facts MySQL Server + azure_rm_mysqlserver_info: + resource_group: "{{ resource_group }}" + name: mysqlsrv{{ rpfx }} + register: output +- name: Assert that storage size is correct + assert: + that: + - output.servers[0]['storage_profile']['storage_mb'] == 128000 + +- name: Restart MySQL Server + azure_rm_mysqlserver: + resource_group: "{{ resource_group }}" + name: mysqlsrv{{ rpfx }} + restarted: True + register: output +- name: Assert the state has not changed + assert: + that: + - output.changed + +- name: Create second instance of MySQL Server + azure_rm_mysqlserver: + resource_group: "{{ resource_group }}" + name: mysqlsrv{{ rpfx }}second + sku: + name: B_Gen5_1 + tier: Basic + location: westus2 + storage_profile: + storage_mb: 51200 + backup_retention_days: 7 + geo_redundant_backup: Disabled + storage_autogrow: Disabled + version: 5.7 + enforce_ssl: True + admin_username: zimxyz + admin_password: Password123! + tags: + aaa: bbb + +- name: Create second instance of MySQL Server + azure_rm_mysqlserver: + resource_group: "{{ resource_group }}" + name: mysqlsrv{{ rpfx }}second + sku: + name: B_Gen5_1 + tier: Basic + location: westus2 + storage_profile: + storage_mb: 51200 + backup_retention_days: 7 + geo_redundant_backup: Disabled + storage_autogrow: Disabled + version: 5.7 + enforce_ssl: True + admin_username: zimxyz + admin_password: Password123! + tags: + ccc: ddd + +- name: Gather facts MySQL Server + azure_rm_mysqlserver_info: + resource_group: "{{ resource_group }}" + name: mysqlsrv{{ rpfx }}second + register: output + +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.servers[0]['id'] != None + - output.servers[0]['name'] != None + - output.servers[0]['location'] != None + - output.servers[0]['sku']['name'] != None + - output.servers[0]['sku']['tier'] != None + - output.servers[0]['sku']['capacity'] != None + - output.servers[0]['version'] != None + - output.servers[0]['user_visible_state'] != None + - output.servers[0]['fully_qualified_domain_name'] != None + - output.servers[0]['tags']['aaa'] == 'bbb' + - output.servers[0]['tags']['ccc'] == 'ddd' + +- name: Gather facts MySQL Server + azure_rm_mysqlserver_info: + resource_group: "{{ resource_group }}" + register: output +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.servers[0]['id'] != None + - output.servers[0]['name'] != None + - output.servers[0]['location'] != None + - output.servers[0]['sku']['name'] != None + - output.servers[0]['sku']['tier'] != None + - output.servers[0]['sku']['capacity'] != None + - output.servers[0]['version'] != None + - output.servers[0]['user_visible_state'] != None + - output.servers[0]['fully_qualified_domain_name'] != None + - output.servers[1]['id'] != None + - output.servers[1]['name'] != None + - output.servers[1]['location'] != None + - output.servers[1]['sku']['name'] != None + - output.servers[1]['sku']['tier'] != None + - output.servers[1]['sku']['capacity'] != None + - output.servers[1]['version'] != None + - output.servers[1]['user_visible_state'] != None + - output.servers[1]['fully_qualified_domain_name'] != None + +# +# azure_rm_mysqldatabase tests below +# +- name: Create instance of MySQL Database -- check mode + azure_rm_mysqldatabase: + resource_group: "{{ resource_group }}" + server_name: mysqlsrv{{ rpfx }} + name: testdatabase + check_mode: yes + register: output +- name: Assert the resource instance is well created + assert: + that: + - output.changed + +- name: Create instance of MySQL Database + azure_rm_mysqldatabase: + resource_group: "{{ resource_group }}" + server_name: mysqlsrv{{ rpfx }} + name: testdatabase + collation: latin1_swedish_ci + charset: latin1 + register: output +- name: Assert the resource instance is well created + assert: + that: + - output.changed + - output.name == 'testdatabase' + +- name: Create again instance of MySQL Database + azure_rm_mysqldatabase: + resource_group: "{{ resource_group }}" + server_name: mysqlsrv{{ rpfx }} + name: testdatabase + collation: latin1_swedish_ci + charset: latin1 + register: output +- name: Assert the state has not changed + assert: + that: + - output.changed == false + - output.name == 'testdatabase' + +- name: Try to update database without force_update + azure_rm_mysqldatabase: + resource_group: "{{ resource_group }}" + server_name: mysqlsrv{{ rpfx }} + name: testdatabase + collation: utf8_general_ci + charset: utf8 + ignore_errors: yes + register: output +- name: Assert that nothing has changed + assert: + that: + - output.changed == False + +- name: Update instance of database using force_update + azure_rm_mysqldatabase: + resource_group: "{{ resource_group }}" + server_name: mysqlsrv{{ rpfx }} + name: testdatabase + collation: utf8_general_ci + charset: utf8 + force_update: yes + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + - output.name == 'testdatabase' + +- name: Create second instance of MySQL Database + azure_rm_mysqldatabase: + resource_group: "{{ resource_group }}" + server_name: mysqlsrv{{ rpfx }} + name: testdatabase2 + +- name: Gather facts MySQL Database + azure_rm_mysqldatabase_info: + resource_group: "{{ resource_group }}" + server_name: mysqlsrv{{ rpfx }} + name: testdatabase + register: output + +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.databases[0]['server_name'] != None + - output.databases[0]['name'] != None + - output.databases[0]['charset'] != None + - output.databases[0]['collation'] != None + +- name: Gather facts MySQL Database + azure_rm_mysqldatabase_info: + resource_group: "{{ resource_group }}" + server_name: mysqlsrv{{ rpfx }} + register: output +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.databases[0]['server_name'] != None + - output.databases[0]['name'] != None + - output.databases[0]['charset'] != None + - output.databases[0]['collation'] != None + - output.databases[1]['server_name'] != None + - output.databases[1]['name'] != None + - output.databases[1]['charset'] != None + - output.databases[1]['collation'] != None + +- name: Delete instance of MySQL Database -- check mode + azure_rm_mysqldatabase: + resource_group: "{{ resource_group }}" + server_name: mysqlsrv{{ rpfx }} + name: testdatabase + state: absent + check_mode: yes + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Delete instance of MySQL Database + azure_rm_mysqldatabase: + resource_group: "{{ resource_group }}" + server_name: mysqlsrv{{ rpfx }} + name: testdatabase + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Delete unexisting instance of MySQL Database + azure_rm_mysqldatabase: + resource_group: "{{ resource_group }}" + server_name: mysqlsrv{{ rpfx }} + name: testdatabase + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed == false + +# +# azure_rm_firewallrule tests below +# +- name: Create instance of Firewall Rule -- check mode + azure_rm_mysqlfirewallrule: + resource_group: "{{ resource_group }}" + server_name: mysqlsrv{{ rpfx }} + name: firewallrule{{ rpfx }} + start_ip_address: 172.28.10.136 + end_ip_address: 172.28.10.138 + check_mode: yes + register: output +- name: Assert the resource instance is well created + assert: + that: + - output.changed + +- name: Create instance of Firewall Rule + azure_rm_mysqlfirewallrule: + resource_group: "{{ resource_group }}" + server_name: mysqlsrv{{ rpfx }} + name: firewallrule{{ rpfx }} + start_ip_address: 172.28.10.136 + end_ip_address: 172.28.10.138 + register: output +- name: Assert the resource instance is well created + assert: + that: + - output.changed + +- name: Create again instance of Firewall Rule + azure_rm_mysqlfirewallrule: + resource_group: "{{ resource_group }}" + server_name: mysqlsrv{{ rpfx }} + name: firewallrule{{ rpfx }} + start_ip_address: 172.28.10.136 + end_ip_address: 172.28.10.138 + register: output +- name: Assert the state has not changed + assert: + that: + - output.changed == false + +- name: Delete instance of Firewall Rule -- check mode + azure_rm_mysqlfirewallrule: + resource_group: "{{ resource_group }}" + server_name: mysqlsrv{{ rpfx }} + name: firewallrule{{ rpfx }} + state: absent + check_mode: yes + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Create instance of Firewall Rule -- second + azure_rm_mysqlfirewallrule: + resource_group: "{{ resource_group }}" + server_name: mysqlsrv{{ rpfx }} + name: firewallrule{{ rpfx }}second + start_ip_address: 172.28.10.136 + end_ip_address: 172.28.10.138 + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Gather facts MySQL Firewall Rule + azure_rm_mysqlfirewallrule_info: + resource_group: "{{ resource_group }}" + server_name: mysqlsrv{{ rpfx }} + name: firewallrule{{ rpfx }} + register: output +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.rules[0].id != None + - output.rules[0].server_name != None + - output.rules[0].name != None + - output.rules[0].start_ip_address != None + - output.rules[0].end_ip_address != None + - "output.rules | length == 1" + +- name: Gather facts MySQL Firewall Rule + azure_rm_mysqlfirewallrule_info: + resource_group: "{{ resource_group }}" + server_name: mysqlsrv{{ rpfx }} + register: output +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.rules[0].id != None + - output.rules[0].server_name != None + - output.rules[0].name != None + - output.rules[0].start_ip_address != None + - output.rules[0].end_ip_address != None + - output.rules[1].id != None + - output.rules[1].name != None + - output.rules[1].start_ip_address != None + - output.rules[1].end_ip_address != None + - "output.rules | length == 2" + +- name: Delete instance of Firewall Rule + azure_rm_mysqlfirewallrule: + resource_group: "{{ resource_group }}" + server_name: mysqlsrv{{ rpfx }} + name: firewallrule{{ rpfx }} + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Delete unexisting instance of Firewall Rule + azure_rm_mysqlfirewallrule: + resource_group: "{{ resource_group }}" + server_name: mysqlsrv{{ rpfx }} + name: firewallrule{{ rpfx }} + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed == false + +- name: Delete instance of Firewall Rule - second + azure_rm_mysqlfirewallrule: + resource_group: "{{ resource_group }}" + server_name: mysqlsrv{{ rpfx }} + name: firewallrule{{ rpfx }}second + state: absent + +- name: Gather facts MySQL Firewall Rule + azure_rm_mysqlfirewallrule_info: + resource_group: "{{ resource_group }}" + server_name: mysqlsrv{{ rpfx }} + name: firewallrule{{ rpfx }} + register: output +- name: Assert that empty list was returned + assert: + that: + - output.changed == False + - "output.rules | length == 0" + +# +# configuration +# +- name: Create instance of Configuration -- check mode + azure_rm_mysqlconfiguration: + resource_group: "{{ resource_group }}" + server_name: mysqlsrv{{ rpfx }} + name: event_scheduler + value: "ON" + check_mode: yes + register: output +- name: Assert that change was registered + assert: + that: + - output.changed + +- name: Try to delete default configuraion + azure_rm_mysqlconfiguration_info: + resource_group: "{{ resource_group }}" + server_name: mysqlsrv{{ rpfx }} + name: event_scheduler + register: output +- name: Get facts of event_scheduler + debug: + var: output + +- name: Try to delete default configuraion + azure_rm_mysqlconfiguration: + resource_group: "{{ resource_group }}" + server_name: mysqlsrv{{ rpfx }} + name: event_scheduler + state: absent + register: output +- name: Assert that change was registered + assert: + that: + - not output.changed + +- name: Try to change default configuraion + azure_rm_mysqlconfiguration: + resource_group: "{{ resource_group }}" + server_name: mysqlsrv{{ rpfx }} + name: event_scheduler + value: "ON" + register: output +- name: Assert that change was registered + assert: + that: + - output.changed + +- name: Try to change default configuration -- idempotent + azure_rm_mysqlconfiguration: + resource_group: "{{ resource_group }}" + server_name: mysqlsrv{{ rpfx }} + name: event_scheduler + value: "ON" + register: output +- name: Assert that change was registered + assert: + that: + - not output.changed + +- name: Try to reset configuration + azure_rm_mysqlconfiguration: + resource_group: "{{ resource_group }}" + server_name: mysqlsrv{{ rpfx }} + name: event_scheduler + state: absent + register: output +- name: Assert that change was registered + assert: + that: + - output.changed + +- name: Try to reset configuration -- idempotent + azure_rm_mysqlconfiguration: + resource_group: "{{ resource_group }}" + server_name: mysqlsrv{{ rpfx }} + name: event_scheduler + state: absent + register: output +- name: Assert that change was registered + assert: + that: + - not output.changed + +- name: Gather facts MySQL Configuration + azure_rm_mysqlconfiguration_info: + resource_group: "{{ resource_group }}" + server_name: mysqlsrv{{ rpfx }} + name: event_scheduler + register: output +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.settings[0].id != None + - output.settings[0].name != None + - output.settings[0].value != None + - output.settings[0].description != None + - output.settings[0].source != None + - output.settings | length == 1 + +- name: Gather facts MySQL Configuration + azure_rm_mysqlconfiguration_info: + resource_group: "{{ resource_group }}" + server_name: mysqlsrv{{ rpfx }} + register: output +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.settings[0].id != None + - output.settings[0].name != None + - output.settings[0].value != None + - output.settings[0].description != None + - output.settings[0].source != None + - output.settings | length > 1 + +# +# clean up azure_rm_mysqlserver test +# + +- name: Delete instance of MySQL Server -- check mode + azure_rm_mysqlserver: + resource_group: "{{ resource_group }}" + name: mysqlsrv{{ rpfx }} + state: absent + check_mode: yes + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Delete instance of MySQL Server + azure_rm_mysqlserver: + resource_group: "{{ resource_group }}" + name: mysqlsrv{{ rpfx }} + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Delete unexisting instance of MySQL Server + azure_rm_mysqlserver: + resource_group: "{{ resource_group }}" + name: mysqlsrv{{ rpfx }} + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed == false + +- name: Delete second instance of MySQL Server + azure_rm_mysqlserver: + resource_group: "{{ resource_group }}" + name: mysqlsrv{{ rpfx }}second + state: absent + async: 400 + poll: 0 diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_natgateway/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_natgateway/aliases new file mode 100644 index 000000000..77e564784 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_natgateway/aliases @@ -0,0 +1,4 @@ +cloud/azure +destructive +shippable/azure/group6 +disabled diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_natgateway/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_natgateway/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_natgateway/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_natgateway/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_natgateway/tasks/main.yml new file mode 100644 index 000000000..b411d2422 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_natgateway/tasks/main.yml @@ -0,0 +1,346 @@ +- name: Prepare random number + set_fact: + rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + run_once: true +- name: Set json query to only retrieve gateways from this test + set_fact: + query: "[?ends_with(name, `{{ rpfx }}`)]" + run_once: true + +- name: Get resource group info + azure_rm_resourcegroup_info: + name: "{{ resource_group }}" + register: rg_output +- name: Store rg location + set_fact: + rg_location: "{{ rg_output.resourcegroups[0].location }}" + run_once: true + +- name: Get NAT gateways + azure_rm_natgateway_info: + resource_group: "{{ resource_group }}" + register: natgw_output +- name: Assert there are no gateways + assert: + that: + - natgw_output.gateways | community.general.json_query(query) | length == 0 + +- name: Create instance of NAT Gateway in check_mode + azure_rm_natgateway: + resource_group: "{{ resource_group }}" + name: nat-gateway{{ rpfx }}1 + register: output + check_mode: yes +- name: Assert the resource instance is well created + assert: + that: + - output.changed + +- name: Get NAT gateways + azure_rm_natgateway_info: + resource_group: "{{ resource_group }}" + register: natgw_output +- name: Assert there are still no gateways + assert: + that: + - natgw_output.gateways | community.general.json_query(query) | length == 0 + +- name: Create instance of NAT Gateway + azure_rm_natgateway: + resource_group: "{{ resource_group }}" + name: nat-gateway{{ rpfx }}1 + register: natgw_output +- name: Assert that gateway is well created + assert: + that: + - natgw_output.changed + - natgw_output.resource_group == "{{ resource_group }}" + - natgw_output.name == "nat-gateway{{ rpfx }}1" + - natgw_output.location == "{{ rg_location }}" +- name: Get NAT gateways + azure_rm_natgateway_info: + name: nat-gateway{{ rpfx }}1 + resource_group: "{{ resource_group }}" + register: natgw_output +- name: Assert that gateway has the right default parameters + assert: + that: + - natgw_output.gateways[0].idle_timeout_in_minutes == 4 + +- name: Try to update instance of NAT Gateway - no change + azure_rm_natgateway: + resource_group: "{{ resource_group }}" + name: nat-gateway{{ rpfx }}1 + register: output +- name: Assert the resource instance is not changed + assert: + that: + - not output.changed + +- name: Try to update instance of NAT Gateway - change timeout + azure_rm_natgateway: + resource_group: "{{ resource_group }}" + name: nat-gateway{{ rpfx }}1 + idle_timeout_in_minutes: 10 + register: natgw_output +- name: Assert the resource instance is changed + assert: + that: + - natgw_output.changed + - natgw_output.location == "{{ rg_location }}" +- name: Get NAT gateways + azure_rm_natgateway_info: + name: nat-gateway{{ rpfx }}1 + resource_group: "{{ resource_group }}" + register: natgw_output +- name: Assert that gateway has the right parameters + assert: + that: + - natgw_output.gateways[0].idle_timeout_in_minutes == 10 + +- name: Try to update instance of NAT Gateway - no change as this is the default value + azure_rm_natgateway: + resource_group: "{{ resource_group }}" + name: nat-gateway{{ rpfx }}1 + sku: + name: standard + idle_timeout_in_minutes: 10 + register: output +- name: Assert the resource instance is not changed + assert: + that: + - not output.changed + +- name: Create public IP for NAT Gateway + azure_rm_publicipaddress: + name: "nat-gateway{{ rpfx }}-pip1" + resource_group: "{{ resource_group }}" + sku: "standard" + allocation_method: "static" +- name: Get public IP for NAT Gateway + azure_rm_publicipaddress_info: + name: "nat-gateway{{ rpfx }}-pip1" + resource_group: "{{ resource_group }}" + register: pip_info_output + +- name: Create second public IP for NAT Gateway + azure_rm_publicipaddress: + name: "nat-gateway{{ rpfx }}-pip2" + resource_group: "{{ resource_group }}" + sku: "standard" + allocation_method: "static" + +- name: Try to update instance of NAT Gateway - add public IPs + azure_rm_natgateway: + resource_group: "{{ resource_group }}" + name: nat-gateway{{ rpfx }}1 + idle_timeout_in_minutes: 10 + sku: + name: standard + public_ip_addresses: + - "{{ pip_info_output.publicipaddresses[0].id }}" + - "nat-gateway{{ rpfx }}-pip2" + register: natgw_output +- name: Assert the resource instance is changed + assert: + that: + - natgw_output.changed +- name: Get NAT gateways + azure_rm_natgateway_info: + name: nat-gateway{{ rpfx }}1 + resource_group: "{{ resource_group }}" + register: natgw_output +- name: Assert that gateway has the right parameters + assert: + that: + - natgw_output.gateways[0].public_ip_addresses | length == 2 + +- name: Try to update instance of NAT Gateway - remove 1 public IPs + azure_rm_natgateway: + resource_group: "{{ resource_group }}" + name: nat-gateway{{ rpfx }}1 + idle_timeout_in_minutes: 10 + sku: + name: standard + public_ip_addresses: + - "nat-gateway{{ rpfx }}-pip2" + register: natgw_output +- name: Assert the resource instance is changed + assert: + that: + - natgw_output.changed +- name: Get NAT gateways + azure_rm_natgateway_info: + name: nat-gateway{{ rpfx }}1 + resource_group: "{{ resource_group }}" + register: natgw_output +- name: Assert that gateway has the right parameters + assert: + that: + - natgw_output.gateways[0].public_ip_addresses | length == 1 + +# Note: disassociating first IP address helps avoiding issues when deleting them +- name: Try to update instance of NAT Gateway - remove last public IPs + azure_rm_natgateway: + resource_group: "{{ resource_group }}" + name: nat-gateway{{ rpfx }}1 + idle_timeout_in_minutes: 10 + sku: + name: standard + register: natgw_output +- name: Assert the resource instance is changed + assert: + that: + - natgw_output.changed +- name: Get NAT gateways + azure_rm_natgateway_info: + name: nat-gateway{{ rpfx }}1 + resource_group: "{{ resource_group }}" + register: natgw_output +- name: Assert that gateway has the right parameters + assert: + that: + - natgw_output.gateways[0].public_ip_addresses == None + +- name: Create virtual network + azure_rm_virtualnetwork: + name: "vnet{{ rpfx }}" + address_prefixes_cidr: + - 10.1.0.0/16 + - 172.100.0.0/16 + - fdda:e69b:1587:495e::/64 + dns_servers: + - 127.0.0.1 + - 127.0.0.3 + tags: + testing: testing + delete: on-exit + resource_group: "{{ resource_group }}" + +- name: Create the subnet + azure_rm_subnet: + name: "subnet{{ rpfx }}" + virtual_network_name: "vnet{{ rpfx }}" + address_prefix_cidr: "10.1.0.0/24" + nat_gateway: "{{ natgw_output.gateways[0].id }}" + resource_group: "{{ resource_group }}" +- name: Get the subnet facts + azure_rm_subnet_info: + name: "subnet{{ rpfx }}" + virtual_network_name: "vnet{{ rpfx }}" + resource_group: "{{ resource_group }}" + register: output +- name: Assert the NAT Gateway is associated + assert: + that: + - output.subnets[0].nat_gateway == "{{ natgw_output.gateways[0].id }}" + +- name: Create instance of NAT Gateway + azure_rm_natgateway: + resource_group: "{{ resource_group }}" + name: nat-gateway{{ rpfx }}3 + register: natgw_output3 +- name: Update the subnet - Update associated NAT Gateway + azure_rm_subnet: + name: "subnet{{ rpfx }}" + virtual_network_name: "vnet{{ rpfx }}" + address_prefix_cidr: "10.1.0.0/24" + resource_group: "{{ resource_group }}" + nat_gateway: nat-gateway{{ rpfx }}3 +- name: Get the subnet facts + azure_rm_subnet_info: + name: "subnet{{ rpfx }}" + virtual_network_name: "vnet{{ rpfx }}" + resource_group: "{{ resource_group }}" + register: output +- name: Assert the NAT Gateway is not associated + assert: + that: + - output.subnets[0].nat_gateway == "{{ natgw_output3.id }}" + +- name: Update the subnet - Disassociate NAT Gateway + azure_rm_subnet: + name: "subnet{{ rpfx }}" + virtual_network_name: "vnet{{ rpfx }}" + address_prefix_cidr: "10.1.0.0/24" + resource_group: "{{ resource_group }}" +- name: Get the subnet facts + azure_rm_subnet_info: + name: "subnet{{ rpfx }}" + virtual_network_name: "vnet{{ rpfx }}" + resource_group: "{{ resource_group }}" + register: output +- name: Assert the NAT Gateway is not associated + assert: + that: + - output.subnets[0].nat_gateway == None + +# A resource with the same name cannot be created in another location +- name: Create a second instance of NAT Gateway - change location and name + azure_rm_natgateway: + resource_group: "{{ resource_group }}" + name: nat-gateway{{ rpfx }}2 + idle_timeout_in_minutes: 10 + location: eastus + zones: [ 1 ] + sku: + name: standard + register: natgw_output +- name: Assert the resource instance is changed + assert: + that: + - natgw_output.changed + - natgw_output.location == "eastus" +- name: Get NAT gateways + azure_rm_natgateway_info: + name: nat-gateway{{ rpfx }}2 + resource_group: "{{ resource_group }}" + register: natgw_output +- name: Assert that gateway has the right parameters + assert: + that: + - natgw_output.gateways[0].idle_timeout_in_minutes == 10 + - natgw_output.gateways[0].zones[0] == "1" + +#### Final cleanup +- name: Delete instance of NAT Gateway + azure_rm_natgateway: + resource_group: "{{ resource_group }}" + name: nat-gateway{{ rpfx }}{{ item }} + state: absent + register: output + with_items: + - 1 + - 2 + - 3 + +- name: Remove subnet + azure_rm_subnet: + state: absent + name: subnet{{ rpfx }} + virtual_network_name: vnet{{ rpfx }} + resource_group: "{{ resource_group }}" + +- name: Remove virtual network + azure_rm_virtualnetwork: + name: "vnet{{ rpfx }}" + resource_group: "{{ resource_group }}" + state: absent + +- name: Get NAT gateways + azure_rm_natgateway_info: + resource_group: "{{ resource_group }}" + register: natgw_output +- name: Assert there are no gateways left + assert: + that: + - natgw_output.gateways | community.general.json_query(query) | length == 0 + +- name: Delete public IPs for NAT Gateway + azure_rm_publicipaddress: + resource_group: "{{ resource_group }}" + name: "nat-gateway{{ rpfx }}-pip{{ item }}" + state: absent + with_items: + - 1 + - 2 diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_networkinterface/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_networkinterface/aliases new file mode 100644 index 000000000..88fb70609 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_networkinterface/aliases @@ -0,0 +1,4 @@ +cloud/azure +shippable/azure/group5 +destructive +azure_rm_applicationsecuritygroup diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_networkinterface/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_networkinterface/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_networkinterface/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_networkinterface/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_networkinterface/tasks/main.yml new file mode 100644 index 000000000..7e38fc7ee --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_networkinterface/tasks/main.yml @@ -0,0 +1,765 @@ +- name: Prepare random number + set_fact: + rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + applicationsecuritygroup_name1: "asg{{ resource_group | hash('md5') | truncate(5, True, '') }}{{ 1000 | random }}" + applicationsecuritygroup_name2: "asg{{ resource_group | hash('md5') | truncate(5, True, '') }}{{ 1000 | random }}" + nic_name1: "nic1{{ resource_group | hash('md5') | truncate(5, True, '') }}{{ 1000 | random }}" + run_once: yes + +- name: Create virtual network + azure_rm_virtualnetwork: + resource_group: "{{ resource_group_secondary }}" + name: "tn{{ rpfx }}" + address_prefixes: ["10.10.0.0/16", "fdae:f296:2787::/48"] + register: vn + +- name: Add subnet + azure_rm_subnet: + resource_group: "{{ resource_group_secondary }}" + name: "tn{{ rpfx }}" + address_prefixes_cidr: ["10.10.0.0/24", "fdae:f296:2787::/64"] + virtual_network: "tn{{ rpfx }}" + +- name: Create public IP addresses + azure_rm_publicipaddress: + name: '{{ item.name }}' + resource_group: '{{ resource_group }}' + sku: 'standard' + allocation_method: 'static' + version: '{{ item.version }}' + loop: + - name: 'pip{{ rpfx }}' + version: 'ipv4' + - name: 'tn{{ rpfx }}' + version: 'ipv4' + - name: 'pip{{ rpfx }}v6' + version: 'ipv6' + - name: 'tn{{ rpfx }}v6' + version: 'ipv6' + +- name: create load balancer with multiple parameters + azure_rm_loadbalancer: + resource_group: '{{ resource_group }}' + name: "lb{{ rpfx }}" + sku: 'Standard' + frontend_ip_configurations: + - name: frontendipconf0 + public_ip_address: "pip{{ rpfx }}" + - name: frontendipconf1 + public_ip_address: "pip{{ rpfx }}v6" + backend_address_pools: + - name: backendaddrpool0 + - name: backendaddrpool1 + - name: backendaddrpool2 + probes: + - name: prob0 + port: 80 + inbound_nat_pools: + - name: inboundnatpool0 + frontend_ip_configuration_name: frontendipconf0 + protocol: Tcp + frontend_port_range_start: 80 + frontend_port_range_end: 81 + backend_port: 8080 + - name: inboundnatpool1 + frontend_ip_configuration_name: frontendipconf1 + protocol: Tcp + frontend_port_range_start: 80 + frontend_port_range_end: 81 + backend_port: 8080 + load_balancing_rules: + - name: lbrbalancingrule0 + frontend_ip_configuration: frontendipconf0 + backend_address_pool: backendaddrpool0 + frontend_port: 80 + backend_port: 80 + probe: prob0 + - name: lbrbalancingrule1 + frontend_ip_configuration: frontendipconf1 + backend_address_pool: backendaddrpool2 + frontend_port: 80 + backend_port: 80 + probe: prob0 + register: lb + +- name: Create most simple NIC with virtual_network id (check mode) + azure_rm_networkinterface: + resource_group: "{{ resource_group }}" + name: "tn{{ rpfx }}" + virtual_network: "{{ vn.state.id }}" + subnet: "tn{{ rpfx }}" + public_ip: False + create_with_security_group: False + register: output + check_mode: yes + +- assert: + that: + - output.changed + +- name: Create most simple NIC with virtual_network resource_group + azure_rm_networkinterface: + resource_group: "{{ resource_group }}" + name: "tn{{ rpfx }}" + virtual_network: + name: "tn{{ rpfx }}" + resource_group: "{{ resource_group_secondary }}" + subnet: "tn{{ rpfx }}" + public_ip: False + create_with_security_group: False + register: output + +- assert: + that: + - output.changed + - output.state.id + - output.state.ip_configuration.primary + +- name: Get fact of the new created NIC + azure_rm_networkinterface_info: + resource_group: "{{ resource_group }}" + name: "tn{{ rpfx }}" + register: facts + +- assert: + that: + - "facts.networkinterfaces | length == 1" + - facts.networkinterfaces[0].id == output.state.id + - "facts.networkinterfaces[0].ip_configurations | length == 1" + - facts.networkinterfaces[0].ip_configurations[0].primary == True + - not facts.networkinterfaces[0].security_group + - not facts.networkinterfaces[0].ip_configurations[0].public_ip_address + - not facts.networkinterfaces[0].enable_ip_forwarding + - not facts.networkinterfaces[0].enable_accelerated_networking + +- name: Create most simple NIC with ip configurations (idempotent) + azure_rm_networkinterface: + resource_group: "{{ facts.networkinterfaces[0].resource_group }}" + name: "{{ facts.networkinterfaces[0].name }}" + virtual_network: "{{ facts.networkinterfaces[0].virtual_network }}" + create_with_security_group: False + ip_configurations: + - name: "{{ facts.networkinterfaces[0].ip_configurations[0].name }}" + private_ip_address: "{{ facts.networkinterfaces[0].ip_configurations[0].private_ip_address }}" + private_ip_allocation_method: "{{ facts.networkinterfaces[0].ip_configurations[0].private_ip_allocation_method }}" + primary: "{{ facts.networkinterfaces[0].ip_configurations[0].primary }}" + subnet: "{{ facts.networkinterfaces[0].subnet }}" + register: output + +- assert: + that: + - not output.changed + +- name: Create most simple NIC (idempotent) + azure_rm_networkinterface: + resource_group: "{{ resource_group }}" + name: "tn{{ rpfx }}" + virtual_network: "{{ vn.state.id }}" + subnet: "tn{{ rpfx }}" + create_with_security_group: False + public_ip: False + register: output + +- assert: + that: + - not output.changed + +- name: Update security group (check mode) + azure_rm_networkinterface: + resource_group: "{{ resource_group }}" + name: "tn{{ rpfx }}" + virtual_network: "{{ vn.state.id }}" + subnet: "tn{{ rpfx }}" + public_ip: False + security_group: "tn{{ rpfx }}sg" + register: output + check_mode: yes + +- assert: + that: + - output.changed + +- name: Update public ip address (check mode) + azure_rm_networkinterface: + resource_group: "{{ resource_group }}" + name: "tn{{ rpfx }}" + virtual_network: "{{ vn.state.id }}" + subnet: "tn{{ rpfx }}" + public_ip_address_name: "tn{{ rpfx }}" + create_with_security_group: False + register: output + check_mode: yes + +- assert: + that: + - output.changed + +- name: Update accelerated networking (check mode) + azure_rm_networkinterface: + resource_group: "{{ resource_group }}" + name: "tn{{ rpfx }}" + virtual_network: "{{ vn.state.id }}" + subnet: "tn{{ rpfx }}" + enable_accelerated_networking: True + create_with_security_group: False + public_ip: False + register: output + check_mode: yes + +- assert: + that: + - output.changed + +- name: Update IP forwarding networking (check mode) + azure_rm_networkinterface: + resource_group: "{{ resource_group }}" + name: "tn{{ rpfx }}" + virtual_network: "{{ vn.state.id }}" + subnet: "tn{{ rpfx }}" + create_with_security_group: False + enable_ip_forwarding: True + public_ip: False + register: output + check_mode: yes + +- assert: + that: + - output.changed + +- name: Update dns server (check mode) + azure_rm_networkinterface: + resource_group: "{{ resource_group }}" + name: "tn{{ rpfx }}" + virtual_network: "{{ vn.state.id }}" + subnet: "tn{{ rpfx }}" + create_with_security_group: False + public_ip: False + dns_servers: + - 8.9.10.11 + - 7.8.9.10 + register: output + check_mode: yes + +- assert: + that: + - output.changed + +- name: Update NIC + azure_rm_networkinterface: + resource_group: "{{ resource_group }}" + name: "tn{{ rpfx }}" + virtual_network: "{{ vn.state.id }}" + subnet: "tn{{ rpfx }}" + enable_accelerated_networking: True + enable_ip_forwarding: True + security_group: "tn{{ rpfx }}sg" + dns_servers: + - 8.9.10.11 + - 7.8.9.10 + ip_configurations: + - name: "{{ facts.networkinterfaces[0].ip_configurations[0].name }}" + private_ip_address: "{{ facts.networkinterfaces[0].ip_configurations[0].private_ip_address }}" + private_ip_allocation_method: "{{ facts.networkinterfaces[0].ip_configurations[0].private_ip_allocation_method }}" + primary: "{{ facts.networkinterfaces[0].ip_configurations[0].primary }}" + - name: ipconfig1 + public_ip_name: "tn{{ rpfx }}" + load_balancer_backend_address_pools: + - "{{ lb.state.backend_address_pools[0].id }}" + - name: backendaddrpool1 + load_balancer: "lb{{ rpfx }}" + - name: ipconfig2 + public_ip_name: "tn{{ rpfx }}v6" + private_ip_address_version: 'IPv6' + load_balancer_backend_address_pools: + - "{{ lb.state.backend_address_pools[2].id }}" + - name: backendaddrpool2 + load_balancer: "lb{{ rpfx }}" + register: output + +- assert: + that: + - output.changed + - output.state.dns_settings.dns_servers == ['8.9.10.11', '7.8.9.10'] + - output.state.enable_ip_forwarding + - output.state.network_security_group.name == "tn{{ rpfx }}sg" + - output.state.enable_accelerated_networking + +- name: Complicated NIC (idempontent) + azure_rm_networkinterface: + resource_group: "{{ resource_group }}" + name: "tn{{ rpfx }}" + virtual_network: "{{ vn.state.id }}" + subnet: "tn{{ rpfx }}" + enable_accelerated_networking: True + security_group: "tn{{ rpfx }}sg" + enable_ip_forwarding: True + dns_servers: + - 8.9.10.11 + - 7.8.9.10 + ip_configurations: + - name: "{{ facts.networkinterfaces[0].ip_configurations[0].name }}" + private_ip_address: "{{ facts.networkinterfaces[0].ip_configurations[0].private_ip_address }}" + private_ip_allocation_method: "{{ facts.networkinterfaces[0].ip_configurations[0].private_ip_allocation_method }}" + primary: "{{ facts.networkinterfaces[0].ip_configurations[0].primary }}" + - name: ipconfig1 + public_ip_name: "tn{{ rpfx }}" + load_balancer_backend_address_pools: + - "{{ lb.state.backend_address_pools[0].id }}" + - name: backendaddrpool1 + load_balancer: "lb{{ rpfx }}" + - name: ipconfig2 + public_ip_name: "tn{{ rpfx }}v6" + private_ip_address_version: 'IPv6' + load_balancer_backend_address_pools: + - "{{ lb.state.backend_address_pools[2].id }}" + - name: backendaddrpool2 + load_balancer: "lb{{ rpfx }}" + register: output + +- assert: + that: + - not output.changed + +- name: Get fact of the new created NIC + azure_rm_networkinterface_info: + resource_group: "{{ resource_group }}" + name: "tn{{ rpfx }}" + register: facts + +- assert: + that: + - "facts.networkinterfaces | length == 1" + - facts.networkinterfaces[0].id == output.state.id + - "facts.networkinterfaces[0].ip_configurations | length == 3" + - 'facts.networkinterfaces[0].security_group.endswith("tn{{ rpfx }}sg")' + - facts.networkinterfaces[0].enable_accelerated_networking + - facts.networkinterfaces[0].enable_ip_forwarding + - facts.networkinterfaces[0].ip_configurations[0].primary == True + - facts.networkinterfaces[0].ip_configurations[1].primary == False + +- name: Remove one dns server and ip configuration + azure_rm_networkinterface: + resource_group: "{{ resource_group }}" + name: "tn{{ rpfx }}" + virtual_network: "{{ vn.state.id }}" + subnet: "tn{{ rpfx }}" + security_group: "tn{{ rpfx }}sg" + enable_accelerated_networking: True + enable_ip_forwarding: True + dns_servers: + - 8.9.10.11 + ip_configurations: + - name: ipconfig1 + public_ip_name: "tn{{ rpfx }}" + primary: True + load_balancer_backend_address_pools: + - "{{ lb.state.backend_address_pools[0].id }}" + - name: backendaddrpool1 + load_balancer: "lb{{ rpfx }}" + register: output + +- assert: + that: + - output.changed + - output.state.dns_settings.dns_servers == ['8.9.10.11'] + - output.state.enable_ip_forwarding + - output.state.network_security_group.name == "tn{{ rpfx }}sg" + - "output.state.ip_configurations | length == 1" + - output.state.ip_configurations[0].public_ip_address.name == "tn{{ rpfx }}" + - output.state.enable_accelerated_networking + +- name: Create application security group(check mode) + azure_rm_applicationsecuritygroup: + resource_group: "{{ resource_group }}" + name: "{{ applicationsecuritygroup_name1 }}" + tags: + testing: testing + check_mode: yes + register: output + +- name: Assert check mode creation + assert: + that: + - output.changed + +- name: Create Application security group + azure_rm_applicationsecuritygroup: + resource_group: "{{ resource_group }}" + name: "{{ applicationsecuritygroup_name1 }}" + tags: + testing: testing + register: output + +- name: Assert application security group creation + assert: + that: + - output.changed + - output.id != '' + +- name: Get Application security group + azure_rm_applicationsecuritygroup_info: + resource_group: "{{ resource_group }}" + name: "{{ applicationsecuritygroup_name1 }}" + register: facts + +- name: Assert facts + assert: + that: + - facts['applicationsecuritygroups'] | length == 1 + - facts['applicationsecuritygroups'][0]['name'] != None + - facts['applicationsecuritygroups'][0]['location'] != None + - facts['applicationsecuritygroups'][0]['provisioning_state'] != None + +- name: Create application security group (idempotent) + azure_rm_applicationsecuritygroup: + resource_group: "{{ resource_group }}" + name: "{{ applicationsecuritygroup_name1 }}" + tags: + testing: testing + register: output + +- name: Assert idempotent + assert: + that: + - not output.changed + +- name: Update application security group + azure_rm_applicationsecuritygroup: + resource_group: "{{ resource_group }}" + name: "{{ applicationsecuritygroup_name1 }}" + tags: + testing: testing + foo: bar + register: output + +- name: Assert update + assert: + that: + - output.changed + +- name: Create Application security group in secondary resource group + azure_rm_applicationsecuritygroup: + resource_group: "{{ resource_group_secondary }}" + name: "{{ applicationsecuritygroup_name2 }}" + register: asg + +- name: Create Nic with application security groups + azure_rm_networkinterface: + resource_group: "{{ resource_group }}" + name: "{{ nic_name1 }}" + virtual_network: "{{ vn.state.id }}" + subnet: "tn{{ rpfx }}" + create_with_security_group: True + public_ip: False + ip_configurations: + - name: ipconfig1 + application_security_groups: + - "{{ applicationsecuritygroup_name1 }}" + - "{{ asg.id }}" + primary: True + register: output + +- name: assert creation succeeded + assert: + that: + - output.changed + +- name: Create Nic with application security groups (idempotent) + azure_rm_networkinterface: + resource_group: "{{ resource_group }}" + name: "{{ nic_name1 }}" + virtual_network: "{{ vn.state.id }}" + subnet: "tn{{ rpfx }}" + create_with_security_group: True + public_ip: False + ip_configurations: + - name: ipconfig1 + application_security_groups: + - "{{ asg.id }}" + - "{{ applicationsecuritygroup_name1 }}" + primary: True + register: output + +- name: assert idempotent + assert: + that: + - not output.changed + +- name: Update Nic with application security groups + azure_rm_networkinterface: + resource_group: "{{ resource_group }}" + name: "{{ nic_name1 }}" + virtual_network: "{{ vn.state.id }}" + subnet: "tn{{ rpfx }}" + create_with_security_group: True + public_ip: False + ip_configurations: + - name: ipconfig1 + application_security_groups: + - "{{ applicationsecuritygroup_name1 }}" + primary: True + register: output + +- name: assert update succeeded + assert: + that: + - output.changed + +- name: Get fact of the new created NIC + azure_rm_networkinterface_info: + resource_group: "{{ resource_group }}" + name: "{{ nic_name1 }}" + register: facts + +- assert: + that: + - "facts.networkinterfaces[0].ip_configurations[0].application_security_groups | length == 1" + +- name: Create subnet for appgw + azure_rm_subnet: + name: appgw-subnet{{ rpfx }} + virtual_network_name: tn{{ rpfx }} + resource_group: "{{ resource_group_secondary }}" + address_prefix_cidr: 10.10.1.0/24 + register: appgw_subnet_output + +- name: Create application gateway to connect NIC to + azure_rm_appgateway: + resource_group: "{{ resource_group_secondary }}" + name: "appgateway{{ rpfx }}" + sku: + name: standard_small + tier: standard + capacity: 2 + gateway_ip_configurations: + - subnet: + id: "{{ appgw_subnet_output.state.id }}" + name: app_gateway_ip_config + frontend_ip_configurations: + - subnet: + id: "{{ appgw_subnet_output.state.id }}" + name: sample_gateway_frontend_ip_config + frontend_ports: + - port: 80 + name: http_frontend_port + backend_address_pools: + - name: test_backend_address_pool # empty pool which will receive attachment to NIC. + backend_http_settings_collection: + - port: 80 + protocol: http + cookie_based_affinity: enabled + name: sample_appgateway_http_settings + http_listeners: + - frontend_ip_configuration: sample_gateway_frontend_ip_config + frontend_port: http_frontend_port + protocol: http + name: http_listener + request_routing_rules: + - rule_type: Basic + backend_address_pool: test_backend_address_pool + backend_http_settings: sample_appgateway_http_settings + http_listener: http_listener + name: rule1 + register: appgw_output + +- name: Create subnet for appgw connected NIC + azure_rm_subnet: + name: nic-appgw-subnet{{ rpfx }} + virtual_network_name: tn{{ rpfx }} + resource_group: "{{ resource_group_secondary }}" + address_prefix_cidr: 10.10.2.0/24 + +- name: "Create NIC attached to application gateway" + azure_rm_networkinterface: + name: "appgw-nic{{ rpfx }}" + resource_group: "{{ resource_group_secondary }}" + virtual_network: "tn{{ rpfx }}" + subnet_name: "nic-appgw-subnet{{ rpfx }}" + create_with_security_group: false + public_ip: false + ip_configurations: + - name: "default" + primary: true + application_gateway_backend_address_pools: + - name: "test_backend_address_pool" + application_gateway: "appgateway{{ rpfx }}" + register: output +- assert: + that: + - output.changed + - output.state.ip_configurations | length == 1 + - output.state.ip_configurations[0].application_gateway_backend_address_pools | length == 1 + - output.state.ip_configurations[0].application_gateway_backend_address_pools[0] == appgw_output.id + '/backendAddressPools/test_backend_address_pool' + +- name: "Create NIC attached to application gateway - idempotent" + azure_rm_networkinterface: + name: "appgw-nic{{ rpfx }}" + resource_group: "{{ resource_group_secondary }}" + virtual_network: "tn{{ rpfx }}" + subnet_name: "nic-appgw-subnet{{ rpfx }}" + create_with_security_group: false + public_ip: false + ip_configurations: + - name: "default" + primary: true + application_gateway_backend_address_pools: + - name: "test_backend_address_pool" + application_gateway: "appgateway{{ rpfx }}" + register: output +- assert: + that: + - not output.changed + +- name: Get facts for appgw nic + azure_rm_networkinterface_info: + name: "appgw-nic{{ rpfx }}" + resource_group: "{{ resource_group_secondary }}" + register: facts +- assert: + that: + - facts.networkinterfaces | length == 1 + - facts.networkinterfaces[0].id == output.state.id + - facts.networkinterfaces[0].ip_configurations | length == 1 + - facts.networkinterfaces[0].ip_configurations[0].application_gateway_backend_address_pools | length == 1 + - facts.networkinterfaces[0].ip_configurations[0].application_gateway_backend_address_pools[0] == appgw_output.id + '/backendAddressPools/test_backend_address_pool' + +- name: "Delete NIC attached to application gateway" + azure_rm_networkinterface: + name: "appgw-nic{{ rpfx }}" + resource_group: "{{ resource_group_secondary }}" + state: absent + register: output +- assert: + that: + - output.changed + +- name: Delete application gateway + azure_rm_appgateway: + name: "appgateway{{ rpfx }}" + resource_group: "{{ resource_group_secondary }}" + state: absent + register: output +- assert: + that: + - output.changed + +- name: Delete the NIC (check mode) + azure_rm_networkinterface: + resource_group: "{{ resource_group }}" + name: "tn{{ rpfx }}" + state: absent + check_mode: yes + register: output + +- assert: + that: + - output.changed + +- name: Delete the NIC + azure_rm_networkinterface: + resource_group: "{{ resource_group }}" + name: "tn{{ rpfx }}" + state: absent + register: output + +- assert: + that: + - output.changed + +- name: Delete the NIC (idempotent) + azure_rm_networkinterface: + resource_group: "{{ resource_group }}" + name: "tn{{ rpfx }}" + state: absent + register: output + +- assert: + that: + - not output.changed + +- name: delete load balancer + azure_rm_loadbalancer: + resource_group: '{{ resource_group }}' + name: "lb{{ rpfx }}" + state: absent + +- name: delete public ip addresses + azure_rm_publicipaddress: + name: "{{ item }}" + resource_group: '{{ resource_group }}' + state: absent + register: output + loop: + - 'pip{{ rpfx }}' + - 'pip{{ rpfx }}v6' + - 'tn{{ rpfx }}' + - 'tn{{ rpfx }}v6' + +- assert: + that: + - output.changed + - output.results | length == 4 + +- name: Delete the NIC + azure_rm_networkinterface: + resource_group: "{{ resource_group }}" + name: "{{ nic_name1 }}" + state: absent + +- name: Delete the application security group (check mode) + azure_rm_applicationsecuritygroup: + resource_group: "{{ resource_group }}" + name: "{{ applicationsecuritygroup_name1 }}" + state: absent + check_mode: yes + register: output + +- name: Assert delete check mode + assert: + that: + - output.changed + +- name: Delete the application security group + azure_rm_applicationsecuritygroup: + resource_group: "{{ resource_group }}" + name: "{{ applicationsecuritygroup_name1 }}" + state: absent + register: output + +- name: Assert the deletion + assert: + that: + - output.changed + +- name: Delete second application security group + azure_rm_applicationsecuritygroup: + resource_group: "{{ resource_group_secondary }}" + name: "{{ applicationsecuritygroup_name2 }}" + state: absent + register: output + +- name: Delete network security groups + azure_rm_securitygroup: + resource_group: '{{ resource_group }}' + name: '{{ item }}' + state: 'absent' + register: output + loop: + - '{{ nic_name1 }}' + - 'tn{{ rpfx }}sg' + +- assert: + that: + - output.changed + - output.results | length == 2 + +- name: Delete virtual network + azure_rm_virtualnetwork: + resource_group: '{{ resource_group_secondary }}' + name: 'tn{{ rpfx }}' + state: 'absent' + register: output + +- assert: + that: + - output.changed diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_notificationhub/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_notificationhub/aliases new file mode 100644 index 000000000..5cf25760d --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_notificationhub/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group10 +destructive \ No newline at end of file diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_notificationhub/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_notificationhub/meta/main.yml new file mode 100644 index 000000000..50fd77acf --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_notificationhub/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_notificationhub/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_notificationhub/tasks/main.yml new file mode 100644 index 000000000..ed79aff83 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_notificationhub/tasks/main.yml @@ -0,0 +1,153 @@ +- name: Create random notification hub and namespace + set_fact: + namespace_name: "test{{ resource_group | hash('md5') | truncate(16, True, '') + (65535 | random | string) }}" + name: "test{{ resource_group | hash('md5') | truncate(16, True, '') + (65535 | random | string) }}" + +- name: Create Notification Hub Namespace (check mode) + azure_rm_notificationhub: + location: eastus2 + namespace_name: "{{ namespace_name }}" + resource_group: "{{ resource_group }}" + sku: "free" + check_mode: yes + register: results + +- assert: + that: results.changed + +- name: Create Notification Hub (check mode) + azure_rm_notificationhub: + location: eastus2 + namespace_name: "{{ namespace_name }}" + name: "{{ name }}" + resource_group: "{{ resource_group }}" + sku: "free" + check_mode: yes + register: results + +- assert: + that: results.changed + +- name: Create Namespace Hub + azure_rm_notificationhub: + location: eastus2 + namespace_name: "{{ namespace_name }}" + resource_group: "{{ resource_group }}" + sku: "free" + register: results + +- assert: + that: results.changed + +- name: Create Notification Hub + azure_rm_notificationhub: + location: eastus2 + namespace_name: "{{ namespace_name }}" + name: "{{ name }}" + resource_group: "{{ resource_group }}" + sku: "free" + register: results + +- assert: + that: results.changed + +- name: Update Namespace + azure_rm_notificationhub: + location: eastus2 + namespace_name: "{{ namespace_name }}" + resource_group: "{{ resource_group }}" + tags: + test: modified + register: results + +- assert: + that: + - results.changed + - results.state.tags.test == 'modified' + + +- name: Update Notification Hub + azure_rm_notificationhub: + location: eastus2 + namespace_name: "{{ namespace_name }}" + name: "{{ name }}" + resource_group: "{{ resource_group }}" + tags: + test: modified + register: results + +- assert: + that: + - results.changed + - results.state.tags.test == 'modified' + +- name: Retrieve Namespace + azure_rm_notificationhub_info: + namespace_name: "{{ namespace_name }}" + resource_group: "{{ resource_group }}" + register: results + +- name: Assert that facts module returned result + assert: + that: + - results.namespace[0].tags.test == 'modified' + +- name: Test idempotent + azure_rm_notificationhub: + namespace_name: "{{ namespace_name }}" + resource_group: "{{ resource_group }}" + register: results + +- assert: + that: + - not results.changed + +# +# azure_rm_ddos_notification hub and namspace cleanup +# + +- name: Delete Namespace + azure_rm_notificationhub: + location: eastus2 + namespace_name: "{{ namespace_name }}" + resource_group: "{{ resource_group }}" + state: absent + +- name: Pause for 3 minutes to make sure delete successfully + pause: + minutes: 3 + +- name: Delete Namespace (idempotent) + azure_rm_notificationhub: + location: eastus2 + namespace_name: "{{ namespace_name }}" + resource_group: "{{ resource_group }}" + state: absent + register: results + +- assert: + that: not results.changed + +- name: Delete Notification Hub + azure_rm_notificationhub: + location: eastus2 + namespace_name: "{{ namespace_name }}" + name: "{{ name }}" + resource_group: "{{ resource_group }}" + state: absent + +- name: Pause for 3 minutes to make sure delete successfully + pause: + minutes: 3 + +- name: Delete Notification Hub (idempotent) + azure_rm_notificationhub: + location: eastus2 + namespace_name: "{{ namespace_name }}" + name: "{{ name }}" + resource_group: "{{ resource_group }}" + state: absent + register: results + +- assert: + that: not results.changed diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_openshiftmanagedcluster/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_openshiftmanagedcluster/aliases new file mode 100644 index 000000000..9eb408856 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_openshiftmanagedcluster/aliases @@ -0,0 +1,4 @@ +cloud/azure +shippable/azure/group5 +destructive +disabled diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_openshiftmanagedcluster/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_openshiftmanagedcluster/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_openshiftmanagedcluster/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_openshiftmanagedcluster/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_openshiftmanagedcluster/tasks/main.yml new file mode 100644 index 000000000..a173bf929 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_openshiftmanagedcluster/tasks/main.yml @@ -0,0 +1,118 @@ +- set_fact: + cluster_name: "{{ resource_group | hash('md5') | truncate(8, True, '') }}" + +- name: get resource group info + azure_rm_resourcegroup_info: + name: "{{ resource_group_secondary }}" + register: rg_output + +- name: Get available clusters for a specific resource_group + azure_rm_openshiftmanagedcluster_info: + resource_group: "{{ resource_group_secondary }}" + register: output + +- assert: + that: output['clusters'] | length == 0 + +- name: Get specific cluster information for a specific cluster + azure_rm_openshiftmanagedcluster_info: + resource_group: "{{ resource_group_secondary }}" + name: "{{ cluster_name }}" + register: output + +- assert: + that: output['clusters'].keys() | length == 0 + +- name: Get all clusters + azure_rm_openshiftmanagedcluster_info: + register: output + +- assert: + that: output['clusters']['value'] | length == 0 + +- name: Create virtual network + azure_rm_virtualnetwork: + resource_group: "{{ resource_group_secondary }}" + name: testVnet + address_prefixes_cidr: + - 10.151.0.0/16 + - 10.152.0.0/16 + +- name: create master subnet + azure_rm_subnet: + name: master_foobar + virtual_network_name: testVnet + resource_group: "{{ resource_group_secondary }}" + address_prefix_cidr: 10.151.0.0/24 + register: master_sub_output + +- name: create work subnet + azure_rm_subnet: + name: worker_foobar + virtual_network_name: testVnet + resource_group: "{{ resource_group_secondary }}" + address_prefix_cidr: 10.152.0.0/24 + register: worker_sub_output + +- name: Create openshift cluster + azure_rm_openshiftmanagedcluster: + resource_group: "{{ resource_group }}" + name: "{{ cluster_name }}" + location: "eastus" + cluster_profile: + cluster_resource_group_id: "{{ rg_output.resourcegroups[0].id }}" + domain: "{{ cluster_name }}" + service_principal_profile: + client_id: "{{ azure_client_id }}" + client_secret: "{{ azure_secret }}" + network_profile: + pod_cidr: "10.128.0.0/14" + service_cidr: "172.30.0.0/16" + worker_profiles: + - vm_size: "Standard_D4s_v3" + subnet_id: "{{ worker_sub_output.state.id }}" + disk_size: 128 + count: 3 + master_profile: + vm_size: "Standard_D8s_v3" + subnet_id: "{{ master_sub_output.state.id }}" + register: output + +- assert: + that: output.changed + + +- name: Get available clusters for a specific resource_group + azure_rm_openshiftmanagedcluster_info: + resource_group: "{{ resource_group }}" + register: output + +- assert: + that: output['clusters'] | length == 1 + +- name: Get specific cluster information for a specific cluster + azure_rm_openshiftmanagedcluster_info: + resource_group: "{{ resource_group }}" + name: "{{ cluster_name }}" + register: output + +- assert: + that: output['clusters']['name'] == "{{ cluster_name }}" + +- name: Get all clusters + azure_rm_openshiftmanagedcluster_info: + register: output + +- assert: + that: output['clusters'] | length >= 1 + +- name: Delete openshift cluster + azure_rm_openshiftmanagedcluster: + resource_group: "{{ resource_group }}" + name: "{{ cluster_name }}" + location: "eastus" + state: absent + register: output + +- assert: + that: output.changed diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_postgresqlserver/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_postgresqlserver/aliases new file mode 100644 index 000000000..f982afc3c --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_postgresqlserver/aliases @@ -0,0 +1,11 @@ +cloud/azure +destructive +unsupported +shippable/azure/group11 +azure_rm_postgresqlserver_facts +azure_rm_postgresqldatabase +azure_rm_postgresqldatabase_facts +azure_rm_postgresqlfirewallrule +azure_rm_postgresqlfirewallrule_facts +azure_rm_postgresqlserverconfiguration +azure_rm_postgresqlserverconfiguration_facts diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_postgresqlserver/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_postgresqlserver/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_postgresqlserver/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_postgresqlserver/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_postgresqlserver/tasks/main.yml new file mode 100644 index 000000000..b4022b27b --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_postgresqlserver/tasks/main.yml @@ -0,0 +1,615 @@ +- name: Prepare random number + set_fact: + rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + run_once: yes + +- name: Create instance of PostgreSQL Server -- check mode + azure_rm_postgresqlserver: + resource_group: "{{ resource_group }}" + name: postgresqlsrv{{ rpfx }} + sku: + name: B_Gen5_1 + tier: Basic + location: westus2 + storage_mb: 51200 + enforce_ssl: True + admin_username: zimxyz + admin_password: Password123! + check_mode: yes + register: output +- name: Assert the resource instance is well created + assert: + that: + - output.changed + +- name: Create instance of PostgreSQL Server + azure_rm_postgresqlserver: + resource_group: "{{ resource_group }}" + name: postgresqlsrv{{ rpfx }} + sku: + name: B_Gen5_1 + tier: Basic + location: westus2 + storage_mb: 51200 + enforce_ssl: True + admin_username: zimxyz + admin_password: Password123! + register: output +- name: Assert the resource instance is well created + assert: + that: + - output.changed + - output.state == 'Ready' + +- name: Create again instance of PostgreSQL Server + azure_rm_postgresqlserver: + resource_group: "{{ resource_group }}" + name: postgresqlsrv{{ rpfx }} + sku: + name: B_Gen5_1 + tier: Basic + location: westus2 + storage_mb: 51200 + enforce_ssl: True + admin_username: zimxyz + admin_password: Password123! + register: output +- name: Assert the state has not changed + assert: + that: + - output.changed == false + - output.state == 'Ready' + +- name: Update instance of PostgreSQL Server, change storage size + azure_rm_postgresqlserver: + resource_group: "{{ resource_group }}" + name: postgresqlsrv{{ rpfx }} + sku: + name: B_Gen5_1 + tier: Basic + location: westus2 + storage_mb: 128000 + enforce_ssl: True + admin_username: zimxyz + admin_password: Password123! + register: output +- name: Assert the state has not changed + assert: + that: + - output.changed + - output.state == 'Ready' +- debug: + var: output + +- name: Gather facts postgresql Server + azure_rm_postgresqlserver_info: + resource_group: "{{ resource_group }}" + name: postgresqlsrv{{ rpfx }} + register: output +- name: Assert that storage size is correct + assert: + that: + - output.servers[0]['storage_mb'] == 128000 + +- name: Create second instance of PostgreSQL Server + azure_rm_postgresqlserver: + resource_group: "{{ resource_group }}" + name: postgresqlsrv{{ rpfx }}second + sku: + name: B_Gen5_1 + tier: Basic + location: westus2 + storage_mb: 51200 + enforce_ssl: True + storage_autogrow: True + backup_retention_days: 7 + admin_username: zimxyz + admin_password: Password123! + tags: + aaa: bbb + +- name: Create second instance of PostgreSQL Server -- add tags + azure_rm_postgresqlserver: + resource_group: "{{ resource_group }}" + name: postgresqlsrv{{ rpfx }}second + sku: + name: B_Gen5_1 + tier: Basic + location: westus2 + storage_mb: 51200 + enforce_ssl: True + storage_autogrow: True + backup_retention_days: 7 + admin_username: zimxyz + admin_password: Password123! + tags: + ccc: ddd + +- name: Gather facts PostgreSQL Server + azure_rm_postgresqlserver_info: + resource_group: "{{ resource_group }}" + name: postgresqlsrv{{ rpfx }}second + register: output + +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.servers[0]['id'] != None + - output.servers[0]['name'] != None + - output.servers[0]['location'] != None + - output.servers[0]['sku']['name'] != None + - output.servers[0]['sku']['tier'] != None + - output.servers[0]['sku']['capacity'] != None + - output.servers[0]['version'] != None + - output.servers[0]['user_visible_state'] != None + - output.servers[0]['fully_qualified_domain_name'] != None + - output.servers[0]['tags']['aaa'] == 'bbb' + - output.servers[0]['tags']['ccc'] == 'ddd' + - output.servers[0]['backup_retention_days'] == 7 + +- name: Gather facts PostgreSQL Server + azure_rm_postgresqlserver_info: + resource_group: "{{ resource_group }}" + register: output +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.servers[0]['id'] != None + - output.servers[0]['name'] != None + - output.servers[0]['location'] != None + - output.servers[0]['sku']['name'] != None + - output.servers[0]['sku']['tier'] != None + - output.servers[0]['sku']['capacity'] != None + - output.servers[0]['version'] != None + - output.servers[0]['user_visible_state'] != None + - output.servers[0]['fully_qualified_domain_name'] != None + - output.servers[1]['id'] != None + - output.servers[1]['name'] != None + - output.servers[1]['location'] != None + - output.servers[1]['sku']['name'] != None + - output.servers[1]['sku']['tier'] != None + - output.servers[1]['sku']['capacity'] != None + - output.servers[1]['version'] != None + - output.servers[1]['user_visible_state'] != None + - output.servers[1]['fully_qualified_domain_name'] != None + +# +# azure_rm_postgresqldatabase tests below +# +- name: Create instance of PostgreSQL Database -- check mode + azure_rm_postgresqldatabase: + resource_group: "{{ resource_group }}" + server_name: postgresqlsrv{{ rpfx }} + name: testdatabase + charset: UTF8 + collation: English_United States.1252 + check_mode: yes + register: output +- name: Assert the resource instance is well created + assert: + that: + - output.changed + +- name: Create instance of PostgreSQL Database + azure_rm_postgresqldatabase: + resource_group: "{{ resource_group }}" + server_name: postgresqlsrv{{ rpfx }} + name: testdatabase + charset: UTF8 + collation: English_United States.1252 + register: output +- name: Assert the resource instance is well created + assert: + that: + - output.changed + - output.name == 'testdatabase' + +- name: Create again instance of PostgreSQL Database + azure_rm_postgresqldatabase: + resource_group: "{{ resource_group }}" + server_name: postgresqlsrv{{ rpfx }} + name: testdatabase + charset: UTF8 + collation: English_United States.1252 + register: output +- name: Assert the state has not changed + assert: + that: + - output.changed == false + - output.name == 'testdatabase' + +- name: Try to update PostgreSQL Database without force_update + azure_rm_postgresqldatabase: + resource_group: "{{ resource_group }}" + server_name: postgresqlsrv{{ rpfx }} + name: testdatabase + charset: utf8 + collation: en_US.utf8 + ignore_errors: yes + register: output +- name: Assert that nothing has changed + assert: + that: + - output.changed == False + +- name: Try to update PostgreSQL Database with force_update + azure_rm_postgresqldatabase: + resource_group: "{{ resource_group }}" + server_name: postgresqlsrv{{ rpfx }} + name: testdatabase + charset: utf8 + collation: en_US.utf8 + force_update: yes + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + - output.name == 'testdatabase' + +- name: Create second instance of PostgreSQL Database + azure_rm_postgresqldatabase: + resource_group: "{{ resource_group }}" + server_name: postgresqlsrv{{ rpfx }} + name: testdatabase2 + +- name: Gather facts PostgreSQL Database + azure_rm_postgresqldatabase_info: + resource_group: "{{ resource_group }}" + server_name: postgresqlsrv{{ rpfx }} + name: testdatabase + register: output + +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.databases[0]['server_name'] != None + - output.databases[0]['name'] != None + - output.databases[0]['charset'] != None + - output.databases[0]['collation'] != None + +- name: Gather facts PostgreSQL Database + azure_rm_postgresqldatabase_info: + resource_group: "{{ resource_group }}" + server_name: postgresqlsrv{{ rpfx }} + register: output +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.databases[0]['server_name'] != None + - output.databases[0]['name'] != None + - output.databases[0]['charset'] != None + - output.databases[0]['collation'] != None + - output.databases[1]['server_name'] != None + - output.databases[1]['name'] != None + - output.databases[1]['charset'] != None + - output.databases[1]['collation'] != None + +- name: Delete instance of PostgreSQL Database -- check mode + azure_rm_postgresqldatabase: + resource_group: "{{ resource_group }}" + server_name: postgresqlsrv{{ rpfx }} + name: testdatabase + state: absent + check_mode: yes + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Delete instance of PostgreSQL Database + azure_rm_postgresqldatabase: + resource_group: "{{ resource_group }}" + server_name: postgresqlsrv{{ rpfx }} + name: testdatabase + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Delete unexisting instance of PostgreSQL Database + azure_rm_postgresqldatabase: + resource_group: "{{ resource_group }}" + server_name: postgresqlsrv{{ rpfx }} + name: testdatabase + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed == false + +# +# azure_rm_postgresqlfirewallrule +# + +- name: Create instance of Firewall Rule -- check mode + azure_rm_postgresqlfirewallrule: + resource_group: "{{ resource_group }}" + server_name: postgresqlsrv{{ rpfx }} + name: firewallrule{{ rpfx }} + start_ip_address: 172.28.10.136 + end_ip_address: 172.28.10.138 + check_mode: yes + register: output +- name: Assert the resource instance is well created + assert: + that: + - output.changed + +- name: Create instance of Firewall Rule + azure_rm_postgresqlfirewallrule: + resource_group: "{{ resource_group }}" + server_name: postgresqlsrv{{ rpfx }} + name: firewallrule{{ rpfx }} + start_ip_address: 172.28.10.136 + end_ip_address: 172.28.10.138 + register: output +- name: Assert the resource instance is well created + assert: + that: + - output.changed + +- name: Create again instance of Firewall Rule + azure_rm_postgresqlfirewallrule: + resource_group: "{{ resource_group }}" + server_name: postgresqlsrv{{ rpfx }} + name: firewallrule{{ rpfx }} + start_ip_address: 172.28.10.136 + end_ip_address: 172.28.10.138 + register: output +- name: Assert the state has not changed + assert: + that: + - output.changed == false + +- name: Create Firewall Rule - second + azure_rm_postgresqlfirewallrule: + resource_group: "{{ resource_group }}" + server_name: postgresqlsrv{{ rpfx }} + name: firewallrule{{ rpfx }}second + start_ip_address: 172.28.10.136 + end_ip_address: 172.28.10.138 + +- name: Gather facts PostgreSQL Firewall Rule + azure_rm_postgresqlfirewallrule_info: + resource_group: "{{ resource_group }}" + server_name: postgresqlsrv{{ rpfx }} + name: firewallrule{{ rpfx }} + register: output +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.rules[0].id != None + - output.rules[0].server_name != None + - output.rules[0].name != None + - output.rules[0].start_ip_address != None + - output.rules[0].end_ip_address != None + - "output.rules | length == 1" + +- name: Gather facts PostgreSQL Firewall Rule + azure_rm_postgresqlfirewallrule_info: + resource_group: "{{ resource_group }}" + server_name: postgresqlsrv{{ rpfx }} + register: output +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.rules[0].id != None + - output.rules[0].server_name != None + - output.rules[0].name != None + - output.rules[0].start_ip_address != None + - output.rules[0].end_ip_address != None + - output.rules[1].id != None + - output.rules[1].name != None + - output.rules[1].start_ip_address != None + - output.rules[1].end_ip_address != None + - "output.rules | length == 2" + +- name: Delete instance of Firewall Rule -- check mode + azure_rm_postgresqlfirewallrule: + resource_group: "{{ resource_group }}" + server_name: postgresqlsrv{{ rpfx }} + name: firewallrule{{ rpfx }} + state: absent + check_mode: yes + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Delete instance of Firewall Rule + azure_rm_postgresqlfirewallrule: + resource_group: "{{ resource_group }}" + server_name: postgresqlsrv{{ rpfx }} + name: firewallrule{{ rpfx }} + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Delete unexisting instance of Firewall Rule + azure_rm_postgresqlfirewallrule: + resource_group: "{{ resource_group }}" + server_name: postgresqlsrv{{ rpfx }} + name: firewallrule{{ rpfx }} + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed == false + +- name: Delete instance of Firewall Rule - second + azure_rm_postgresqlfirewallrule: + resource_group: "{{ resource_group }}" + server_name: postgresqlsrv{{ rpfx }} + name: firewallrule{{ rpfx }}second + state: absent + +- name: Gather facts PostgreSQL Firewall Rule + azure_rm_postgresqlfirewallrule_info: + resource_group: "{{ resource_group }}" + server_name: postgresqlsrv{{ rpfx }} + name: firewallrule{{ rpfx }} + register: output +- name: Assert that empty list was returned + assert: + that: + - output.changed == False + - "output.rules | length == 0" + +# +# azure_rm_postgresql_configuration +# +- name: Create instance of Configuration -- check mode + azure_rm_postgresqlconfiguration: + resource_group: "{{ resource_group }}" + server_name: postgresqlsrv{{ rpfx }} + name: deadlock_timeout + value: 2000 + check_mode: yes + register: output +- name: Assert that change was registered + assert: + that: + - output.changed + +- name: Try to change default configuration + azure_rm_postgresqlconfiguration: + resource_group: "{{ resource_group }}" + server_name: postgresqlsrv{{ rpfx }} + name: deadlock_timeout + value: 2000 + register: output +- name: Assert that change was registered + assert: + that: + - output.changed + +- name: Try to change default configuration -- idempotent + azure_rm_postgresqlconfiguration: + resource_group: "{{ resource_group }}" + server_name: postgresqlsrv{{ rpfx }} + name: deadlock_timeout + value: 2000 + register: output +- name: Assert that change was not registered + assert: + that: + - not output.changed + +- name: Try to reset configuration + azure_rm_postgresqlconfiguration: + resource_group: "{{ resource_group }}" + server_name: postgresqlsrv{{ rpfx }} + name: deadlock_timeout + state: absent + register: output +- name: Assert that change was registered + assert: + that: + - output.changed + +- name: Try to reset configuration -- idempotent + azure_rm_postgresqlconfiguration: + resource_group: "{{ resource_group }}" + server_name: postgresqlsrv{{ rpfx }} + name: deadlock_timeout + state: absent + register: output +- name: Assert that change was registered + assert: + that: + - not output.changed + +- name: Gather facts PostgreSQL Configuration + azure_rm_postgresqlconfiguration_info: + resource_group: "{{ resource_group }}" + server_name: postgresqlsrv{{ rpfx }} + name: deadlock_timeout + register: output +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.settings[0].id != None + - output.settings[0].name != None + - output.settings[0].value != None + - output.settings[0].description != None + - output.settings[0].source != None + - output.settings | length == 1 + +- name: Gather facts PostgreSQL Configuration + azure_rm_postgresqlconfiguration_info: + resource_group: "{{ resource_group }}" + server_name: postgresqlsrv{{ rpfx }} + register: output +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.settings[0].id != None + - output.settings[0].name != None + - output.settings[0].value != None + - output.settings[0].description != None + - output.settings[0].source != None + - output.settings | length > 1 + +# +# azure_rm_postgresqlserver continuation / clean up +# + +- name: Delete instance of PostgreSQL Server -- check mode + azure_rm_postgresqlserver: + resource_group: "{{ resource_group }}" + name: postgresqlsrv{{ rpfx }} + state: absent + check_mode: yes + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Delete instance of PostgreSQL Server + azure_rm_postgresqlserver: + resource_group: "{{ resource_group }}" + name: postgresqlsrv{{ rpfx }} + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Delete unexisting instance of PostgreSQL Server + azure_rm_postgresqlserver: + resource_group: "{{ resource_group }}" + name: postgresqlsrv{{ rpfx }} + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed == false + +- name: Delete second instance of PostgreSQL Server + azure_rm_postgresqlserver: + resource_group: "{{ resource_group }}" + name: postgresqlsrv{{ rpfx }}second + state: absent + async: 400 + poll: 0 diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatednsrecordset/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatednsrecordset/aliases new file mode 100644 index 000000000..8f7a9a2e5 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatednsrecordset/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group1 +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatednsrecordset/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatednsrecordset/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatednsrecordset/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatednsrecordset/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatednsrecordset/tasks/main.yml new file mode 100644 index 000000000..645851a04 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatednsrecordset/tasks/main.yml @@ -0,0 +1,259 @@ +- name: Create random domain name + set_fact: + domain_name: "{{ resource_group | hash('md5') | truncate(16, True, '') + (65535 | random | string) }}" + +- name: Create a Private DNS zone + azure_rm_privatednszone: + resource_group: "{{ resource_group }}" + name: "{{ domain_name }}.com" + state: present + register: results + +- name: Assert that Private DNS zone was created + assert: + that: results.changed + +- name: create "A" record set with multiple records + azure_rm_privatednsrecordset: + resource_group: "{{ resource_group }}" + relative_name: www + zone_name: "{{ domain_name }}.com" + record_type: A + records: + - entry: 192.168.100.101 + - entry: 192.168.100.102 + - entry: 192.168.100.103 + register: results + +- name: Assert that A record set was created + assert: + that: results.changed + +- name: re-run "A" record with same values + azure_rm_privatednsrecordset: + resource_group: "{{ resource_group }}" + relative_name: www + zone_name: "{{ domain_name }}.com" + record_type: A + records: + - entry: 192.168.100.101 + - entry: 192.168.100.102 + - entry: 192.168.100.103 + register: results + +- name: Assert that A record set was not changed + assert: + that: not results.changed + +- name: Update "A" record set with additional record + azure_rm_privatednsrecordset: + resource_group: "{{ resource_group }}" + relative_name: www + zone_name: "{{ domain_name }}.com" + record_type: A + record_mode: append + records: + - entry: 192.168.100.104 + register: results + +- name: Assert that new record was appended + assert: + that: + - results.changed + +- name: re-update "A" record set with additional record + azure_rm_privatednsrecordset: + resource_group: "{{ resource_group }}" + relative_name: www + zone_name: "{{ domain_name }}.com" + record_type: A + record_mode: append + records: + - entry: 192.168.100.104 + register: results + +- name: Assert that A record set was not changed + assert: + that: + - not results.changed + +- name: Remove 1 record from record set + azure_rm_privatednsrecordset: + resource_group: "{{ resource_group }}" + relative_name: www + zone_name: "{{ domain_name }}.com" + record_type: A + records: + - entry: 192.168.100.101 + - entry: 192.168.100.102 + - entry: 192.168.100.103 + register: results + +- name: Assert that record was deleted + assert: + that: + - results.changed + +- name: Check_mode test + azure_rm_privatednsrecordset: + resource_group: "{{ resource_group }}" + relative_name: www + zone_name: "{{ domain_name }}.com" + record_type: A + records: + - entry: 192.168.100.105 + check_mode: yes + register: results + +- name: Assert that check_mode returns new state + assert: + that: + - results.changed + +- name: Get information for A DNS recordset from Private DNS zone + azure_rm_privatednsrecordset_info: + resource_group: "{{ resource_group }}" + zone_name: "{{ domain_name }}.com" + relative_name: www + record_type: A + register: results + +- assert: + that: + - not results.changed + - results.dnsrecordsets[0].id != None + - results.dnsrecordsets[0].fqdn != None + - results.dnsrecordsets[0].record_type == 'A' + - results.dnsrecordsets[0].time_to_live != None + - results.dnsrecordsets[0].relative_name == 'www' + - results.dnsrecordsets[0].records | length > 0 + +- name: delete a record set + azure_rm_privatednsrecordset: + resource_group: "{{ resource_group }}" + relative_name: www + zone_name: "{{ domain_name }}.com" + record_type: A + state: absent + register: results + +- name: Assert that record set deleted + assert: + that: results.changed + +- name: (idempotence test) re-run record set absent + azure_rm_privatednsrecordset: + resource_group: "{{ resource_group }}" + relative_name: www + zone_name: "{{ domain_name }}.com" + record_type: A + state: absent + register: results + +- name: + assert: + that: not results.changed + +- name: create SRV records in a new record set + azure_rm_privatednsrecordset: + resource_group: "{{ resource_group }}" + relative_name: "_sip._tcp.{{ domain_name }}.com" + zone_name: "{{ domain_name }}.com" + time_to_live: 7200 + record_type: SRV + state: present + records: + - entry: sip.{{ domain_name }}.com + priority: 20 + weight: 10 + port: 5060 + register: results + +- name: Assert that SRV record set was created + assert: + that: + - results.changed + +- name: Get information for SRV DNS recordset from Private DNS zone + azure_rm_privatednsrecordset_info: + resource_group: "{{ resource_group }}" + zone_name: "{{ domain_name }}.com" + relative_name: "_sip._tcp.{{ domain_name }}.com" + record_type: SRV + register: results + +- assert: + that: + - not results.changed + - results.dnsrecordsets[0].id != None + - results.dnsrecordsets[0].fqdn != None + - results.dnsrecordsets[0].record_type == 'SRV' + - results.dnsrecordsets[0].time_to_live == 7200 + - results.dnsrecordsets[0].relative_name == "_sip._tcp.{{ domain_name }}.com" + - results.dnsrecordsets[0].records | length > 0 + +- name: create TXT records in a new record set + azure_rm_privatednsrecordset: + resource_group: "{{ resource_group }}" + relative_name: "_txt.{{ domain_name }}.com" + zone_name: "{{ domain_name }}.com" + record_type: TXT + state: present + records: + - entry: "v=spf1 a -all" + - entry: "foo" + - entry: + - "bar" + - "baz" + register: results + +- name: Assert that TXT record set was created + assert: + that: + - results.changed + +- name: Get information for TXT DNS recordset from Private DNS zone + azure_rm_privatednsrecordset_info: + resource_group: "{{ resource_group }}" + zone_name: "{{ domain_name }}.com" + relative_name: "_txt.{{ domain_name }}.com" + record_type: TXT + register: results + +- assert: + that: + - not results.changed + - results.dnsrecordsets[0].id != None + - results.dnsrecordsets[0].fqdn != None + - results.dnsrecordsets[0].record_type == 'TXT' + - results.dnsrecordsets[0].time_to_live == 3600 + - results.dnsrecordsets[0].relative_name == "_txt.{{ domain_name }}.com" + - results.dnsrecordsets[0].records | length > 0 + +- name: Update SOA record + azure_rm_privatednsrecordset: + resource_group: "{{ resource_group }}" + relative_name: "@" + zone_name: "{{ domain_name }}.com" + record_type: SOA + state: present + records: + - host: azureprivatedns.net + email: azureprivatedns-host99.example.com + serial_number: 1 + refresh_time: 3699 + retry_time: 399 + expire_time: 2419299 + minimum_ttl: 399 + register: results + +- name: Assert that SOA record set was created + assert: + that: + - results.changed + +- name: Delete DNS zone + azure_rm_privatednszone: + resource_group: "{{ resource_group }}" + name: "{{ domain_name }}.com" + state: absent diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatednszone/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatednszone/aliases new file mode 100644 index 000000000..90d5921a5 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatednszone/aliases @@ -0,0 +1,4 @@ +cloud/azure +shippable/azure/group2 +destructive +azure_rm_privatednszone_info diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatednszone/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatednszone/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatednszone/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatednszone/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatednszone/tasks/main.yml new file mode 100644 index 000000000..eba80899d --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatednszone/tasks/main.yml @@ -0,0 +1,77 @@ +- name: Create random domain name + set_fact: + domain_name: "{{ resource_group | hash('md5') | truncate(16, True, '') + (65535 | random | string) }}" + +- name: Create a private DNS zone (check mode) + azure_rm_privatednszone: + resource_group: "{{ resource_group }}" + name: "{{ domain_name }}.com" + register: results + check_mode: true + +- assert: + that: results.changed + +- name: Create a private DNS zone + azure_rm_privatednszone: + resource_group: "{{ resource_group }}" + name: "{{ domain_name }}.com" + register: results + +- assert: + that: results.changed + +- name: Update private DNS zone with tags + azure_rm_privatednszone: + resource_group: "{{ resource_group }}" + name: "{{ domain_name }}.com" + tags: + test: modified + register: results + +- assert: + that: + - results.changed + - results.state.tags.test == 'modified' + +- name: Test idempotent + azure_rm_privatednszone: + name: "{{ domain_name }}.com" + resource_group: "{{ resource_group }}" + register: results + +- assert: + that: + - not results.changed + +- name: Retrieve DNS Zone Facts + azure_rm_privatednszone_info: + resource_group: "{{ resource_group }}" + name: "{{ domain_name }}.com" + register: zones + +- name: Assert that facts module returned result + assert: + that: + - zones.privatednszones[0].tags.test == 'modified' + - zones.privatednszones[0].number_of_record_sets == 1 + +# +# azure_rm_privatednszone cleanup +# + +- name: Delete private DNS zone + azure_rm_privatednszone: + resource_group: "{{ resource_group }}" + name: "{{ domain_name }}.com" + state: absent + +- name: Delete private DNS zone (idempotent) + azure_rm_privatednszone: + resource_group: "{{ resource_group }}" + name: "{{ domain_name }}.com" + state: absent + register: results + +- assert: + that: not results.changed diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatednszonelink/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatednszonelink/aliases new file mode 100644 index 000000000..5d29c6c4d --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatednszonelink/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group10 +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatednszonelink/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatednszonelink/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatednszonelink/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatednszonelink/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatednszonelink/tasks/main.yml new file mode 100644 index 000000000..6125abaa3 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatednszonelink/tasks/main.yml @@ -0,0 +1,126 @@ +- name: Create virtual network link name + set_fact: + link_name: "link{{ resource_group | hash('md5') | truncate(22, True, '') }}" + +- name: Create random number + set_fact: + rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + run_once: yes + +- name: Create random domain name + set_fact: + domain_name: "{{ resource_group | hash('md5') | truncate(16, True, '') + (65535 | random | string) }}" + +- name: Create a Private DNS zone + azure_rm_privatednszone: + resource_group: "{{ resource_group }}" + name: "{{ domain_name }}.com" + state: present + +- name: Create a virtual network + azure_rm_virtualnetwork: + name: "vnet{{ rpfx }}" + resource_group: "{{ resource_group }}" + address_prefixes_cidr: + - 10.1.0.0/16 + - 172.100.0.0/16 + dns_servers: + - 127.0.0.1 + - 127.0.0.2 + +- name: Create a subnet + azure_rm_subnet: + name: "subnet{{ rpfx }}" + virtual_network_name: "vnet{{ rpfx }}" + resource_group: "{{ resource_group }}" + address_prefix_cidr: 10.1.0.0/24 + +- name: Create a virtual network link + azure_rm_privatednszonelink: + resource_group: "{{ resource_group }}" + name: "{{ link_name }}" + zone_name: "{{ domain_name }}.com" + virtual_network: "vnet{{ rpfx }}" + state: present + register: results + +- name: Assert that virtual network link is created + assert: + that: results.changed + +- name: Create a virtual network link (Idempotent test) + azure_rm_privatednszonelink: + resource_group: "{{ resource_group }}" + name: "{{ link_name }}" + zone_name: "{{ domain_name }}.com" + virtual_network: "vnet{{ rpfx }}" + state: present + register: results + +- name: Assert that output is not changed + assert: + that: not results.changed + +- name: Update a virtual network link + azure_rm_privatednszonelink: + resource_group: "{{ resource_group }}" + name: "{{ link_name }}" + zone_name: "{{ domain_name }}.com" + registration_enabled: true + virtual_network: "vnet{{ rpfx }}" + state: present + register: results + +- name: Assert that virtual network link is updated + assert: + that: results.changed + +- name: Get virtual network link + azure_rm_privatednszonelink_info: + resource_group: "{{ resource_group }}" + name: "{{ link_name }}" + zone_name: "{{ domain_name }}.com" + register: results + +- assert: + that: + - not results.changed + - results.virtualnetworklinks[0].name == "{{ link_name }}" + - results.virtualnetworklinks[0].registration_enabled == true + - results.virtualnetworklinks[0].provisioning_state == "Succeeded" + +- name: Delete virtual network link + azure_rm_privatednszonelink: + resource_group: "{{ resource_group }}" + name: "{{ link_name }}" + zone_name: "{{ domain_name }}.com" + state: absent + register: results + +- name: Assert that virtual network link is deleted + assert: + that: results.changed + +- name: Delete virtual network link (Idempotent test) + azure_rm_privatednszonelink: + resource_group: "{{ resource_group }}" + name: "{{ link_name }}" + zone_name: "{{ domain_name }}.com" + state: absent + register: results + +- name: Asset that output is not changed + assert: + that: not results.changed + +- name: Delete virtual network + azure_rm_virtualnetwork: + name: "vnet{{ rpfx }}" + resource_group: "{{ resource_group }}" + state: absent + +- name: Delete private DNS zone + azure_rm_privatednszone: + resource_group: "{{ resource_group }}" + name: "{{ domain_name }}.com" + state: absent diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privateendpoint/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privateendpoint/aliases new file mode 100644 index 000000000..a31676ea1 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privateendpoint/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group6 +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privateendpoint/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privateendpoint/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privateendpoint/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privateendpoint/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privateendpoint/tasks/main.yml new file mode 100644 index 000000000..306c1cc0a --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privateendpoint/tasks/main.yml @@ -0,0 +1,157 @@ +- name: Set Private Endpoint Name + set_fact: + rpfx: "private{{ resource_group | hash('md5') | truncate(18, True, '') }}" + +- name: Create virtual network + azure_rm_virtualnetwork: + name: vnet{{ rpfx }} + address_prefixes_cidr: + - 10.1.0.0/16 + dns_servers: + - 127.0.0.1 + - 127.0.0.3 + resource_group: "{{ resource_group }}" + +- name: Create network security group + azure_rm_securitygroup: + name: secgroup{{ rpfx }} + resource_group: "{{ resource_group }}" + +- name: Create the subnet + azure_rm_subnet: + name: subnet{{ rpfx }} + virtual_network_name: vnet{{ rpfx }} + resource_group: "{{ resource_group }}" + address_prefix_cidr: "10.1.0.0/16" + security_group: secgroup{{ rpfx }} + private_endpoint_network_policies: Disabled + private_link_service_network_policies: Disabled + service_endpoints: + - service: Microsoft.Sql + locations: + - eastus + - westus + register: subnet_output + +- name: Create instance of PostgreSQL Server + azure_rm_postgresqlserver: + resource_group: "{{ resource_group }}" + name: postgresqlsrv{{ rpfx }} + sku: + name: GP_Gen5_2 + tier: GeneralPurpose + location: eastus + storage_mb: 51200 + enforce_ssl: True + admin_username: zimxyz + admin_password: Password123! + register: post_output + +- name: Delete private endpoint, makesure there is no private endpoint exist + azure_rm_privateendpoint: + name: privateendpoint{{ rpfx }} + resource_group: "{{ resource_group }}" + state: absent + +- name: Create private endpoint + azure_rm_privateendpoint: + name: privateendpoint{{ rpfx }} + resource_group: "{{ resource_group }}" + private_link_service_connections: + - name: privateEndpoints_test_name + private_link_service_id: "{{ post_output.id }}" + group_ids: + - postgresqlServer + subnet: + id: "{{ subnet_output.state.id }}" + tags: + key1: value1 + cert_validation_mode: ignore + register: output + +- name: Assert status succeeded and results match expectations + assert: + that: + - output.changed + - output.state.id is defined + - output.state.provisioning_state == "Succeeded" + - output.state.tags | length == 1 + +- name: Create private endpoint ( Idempotent Test) + azure_rm_privateendpoint: + name: privateendpoint{{ rpfx }} + resource_group: "{{ resource_group }}" + private_link_service_connections: + - name: privateEndpoints_test_name + private_link_service_id: "{{ post_output.id }}" + group_ids: + - postgresqlServer + subnet: + id: "{{ subnet_output.state.id }}" + tags: + key1: value1 + register: output + +- name: Assert status succeeded and results match expectations + assert: + that: + - not output.changed + ignore_errors: yes + +- name: Update private endpoint with tags + azure_rm_privateendpoint: + name: privateendpoint{{ rpfx }} + resource_group: "{{ resource_group }}" + private_link_service_connections: + - name: privateEndpoints_test_name + private_link_service_id: "{{ post_output.id }}" + group_ids: + - postgresqlServer + subnet: + id: "{{ subnet_output.state.id }}" + tags: + key1: value1 + key2: value2 + register: output + +- name: Assert status succeeded and results match expectations + assert: + that: + - output.changed + - output.state.tags | length == 2 + +- name: Get private endpoint info + azure_rm_privateendpoint_info: + name: privateendpoint{{ rpfx }} + resource_group: "{{ resource_group }}" + register: output + +- name: Assert private endpoint info + assert: + that: + - output.privateendpoints[0].provisioning_state == "Succeeded" + - output.privateendpoints[0].tags | length == 2 + +- name: Delete private endpoint + azure_rm_privateendpoint: + name: privateendpoint{{ rpfx }} + resource_group: "{{ resource_group }}" + state: absent + register: output + +- name: Assert status succeeded and results match expectations + assert: + that: + - output.changed + +- name: Delete private endpoint ( Idempotent Test) + azure_rm_privateendpoint: + name: privateendpoint{{ rpfx }} + resource_group: "{{ resource_group }}" + state: absent + register: output + +- name: Assert status succeeded and results match expectations + assert: + that: + - not output.changed diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privateendpointdnszonegroup/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privateendpointdnszonegroup/aliases new file mode 100644 index 000000000..a31676ea1 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privateendpointdnszonegroup/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group6 +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privateendpointdnszonegroup/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privateendpointdnszonegroup/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privateendpointdnszonegroup/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privateendpointdnszonegroup/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privateendpointdnszonegroup/tasks/main.yml new file mode 100644 index 000000000..e58a3ba22 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privateendpointdnszonegroup/tasks/main.yml @@ -0,0 +1,255 @@ +- name: Prepare random number + set_fact: + rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + run_once: true + +- name: "Create virtual network" + azure_rm_virtualnetwork: + name: "vnet-{{ rpfx }}" + address_prefixes_cidr: + - "10.1.0.0/16" + resource_group: "{{ resource_group }}" + +- name: "Create the subnet" + azure_rm_subnet: + name: "subnet-{{ rpfx }}" + virtual_network_name: "vnet-{{ rpfx }}" + resource_group: "{{ resource_group }}" + address_prefix_cidr: "10.1.0.0/16" + private_endpoint_network_policies: Disabled + private_link_service_network_policies: Disabled + register: subnet_output + +- name: "Create instance of PostgreSQL Server" + azure_rm_postgresqlserver: + resource_group: "{{ resource_group }}" + name: "postgresqlsrv-{{ rpfx }}" + sku: + name: "GP_Gen5_2" + tier: "GeneralPurpose" + location: "eastus" + storage_mb: 51200 + enforce_ssl: true + admin_username: "zimxyz" + admin_password: "Password123!" + register: post_output + +- name: Create zone group for non-existant private endpoint + azure_rm_privateendpointdnszonegroup: + name: "zone-group-{{ rpfx }}" + private_endpoint: "does-not-exist-{{ rpfx }}" + resource_group: "{{ resource_group }}" + ignore_errors: true + register: output +- name: Assert results match expectations + assert: + that: + - output.msg is match("Could not load the private endpoint", ignorecase=True) + +- name: Create private endpoint + azure_rm_privateendpoint: + name: privateendpoint-{{ rpfx }} + resource_group: "{{ resource_group }}" + private_link_service_connections: + - name: "postgres-link" + private_link_service_id: "{{ post_output.id }}" + group_ids: + - "postgresqlServer" + subnet: + id: "{{ subnet_output.state.id }}" + +- name: Create private DNS zone for postgres + azure_rm_privatednszone: + name: "privatelink.postgres.database.azure.com" + resource_group: "{{ resource_group }}" + +- name: Create virtual network link + azure_rm_privatednszonelink: + name: "private-link-{{ rpfx }}" + resource_group: "{{ resource_group }}" + zone_name: "privatelink.postgres.database.azure.com" + virtual_network: "vnet-{{ rpfx }}" + +- name: Get zone groups for private endpoint + azure_rm_privateendpointdnszonegroup_info: + private_endpoint: "privateendpoint-{{ rpfx }}" + resource_group: "{{ resource_group }}" + register: output +- name: Assert results match expectations + assert: + that: + - not output.changed + - output.groups | length == 0 + +- name: Create zone group for private endpoint - check mode + azure_rm_privateendpointdnszonegroup: + name: "zone-group-{{ rpfx }}" + private_endpoint: "privateendpoint-{{ rpfx }}" + resource_group: "{{ resource_group }}" + private_dns_zone_configs: + - name: "default" + private_dns_zone: "privatelink.postgres.database.azure.com" + register: output + check_mode: true +- name: Assert results match expectations + assert: + that: + - output.changed + +- name: Create zone group for private endpoint + azure_rm_privateendpointdnszonegroup: + name: "zone-group-{{ rpfx }}" + private_endpoint: "privateendpoint-{{ rpfx }}" + resource_group: "{{ resource_group }}" + private_dns_zone_configs: + - name: "default" + private_dns_zone: "privatelink.postgres.database.azure.com" + register: output +- name: Assert results match expectations + assert: + that: + - output.changed + - output.state.id + - output.state.name == 'zone-group-{{ rpfx }}' + - output.state.provisioning_state == 'Succeeded' + - output.state.private_dns_zone_configs | length == 1 + - output.state.private_dns_zone_configs[0].name == 'default' + - output.state.private_dns_zone_configs[0].private_dns_zone_id + - output.state.private_dns_zone_configs[0].record_sets | length == 1 + - output.state.private_dns_zone_configs[0].record_sets[0].fqdn == 'postgresqlsrv-{{ rpfx }}.privatelink.postgres.database.azure.com' + - output.state.private_dns_zone_configs[0].record_sets[0].ip_addresses | length == 1 + - output.state.private_dns_zone_configs[0].record_sets[0].ip_addresses[0] is match('^10.1.*') + - output.state.private_dns_zone_configs[0].record_sets[0].provisioning_state == 'Succeeded' + - output.state.private_dns_zone_configs[0].record_sets[0].record_set_name == 'postgresqlsrv-{{ rpfx }}' + - output.state.private_dns_zone_configs[0].record_sets[0].record_type == 'A' + - output.state.private_dns_zone_configs[0].record_sets[0].ttl + +- name: Create zone group for private endpoint - idempotent + azure_rm_privateendpointdnszonegroup: + name: "zone-group-{{ rpfx }}" + private_endpoint: "privateendpoint-{{ rpfx }}" + resource_group: "{{ resource_group }}" + private_dns_zone_configs: + - name: "default" + private_dns_zone: "privatelink.postgres.database.azure.com" + register: output +- name: Assert results match expectations + assert: + that: + - not output.changed + +- name: Get specific zone group for private endpoint + azure_rm_privateendpointdnszonegroup_info: + name: "zone-group-{{ rpfx }}" + private_endpoint: "privateendpoint-{{ rpfx }}" + resource_group: "{{ resource_group }}" + register: output +- name: Assert results match expectations + assert: + that: + - not output.changed + - output.groups | length == 1 + - output.groups[0].id + - output.groups[0].name == 'zone-group-{{ rpfx }}' + - output.groups[0].provisioning_state == 'Succeeded' + - output.groups[0].private_dns_zone_configs | length == 1 + - output.groups[0].private_dns_zone_configs[0].name == 'default' + - output.groups[0].private_dns_zone_configs[0].private_dns_zone_id + - output.groups[0].private_dns_zone_configs[0].record_sets | length == 1 + - output.groups[0].private_dns_zone_configs[0].record_sets[0].fqdn == 'postgresqlsrv-{{ rpfx }}.privatelink.postgres.database.azure.com' + - output.groups[0].private_dns_zone_configs[0].record_sets[0].ip_addresses | length == 1 + - output.groups[0].private_dns_zone_configs[0].record_sets[0].ip_addresses[0] is match('^10.1.*') + - output.groups[0].private_dns_zone_configs[0].record_sets[0].provisioning_state == 'Succeeded' + - output.groups[0].private_dns_zone_configs[0].record_sets[0].record_set_name == 'postgresqlsrv-{{ rpfx }}' + - output.groups[0].private_dns_zone_configs[0].record_sets[0].record_type == 'A' + - output.groups[0].private_dns_zone_configs[0].record_sets[0].ttl + +- name: Get all zone groups for private endpoint + azure_rm_privateendpointdnszonegroup_info: + private_endpoint: "privateendpoint-{{ rpfx }}" + resource_group: "{{ resource_group }}" + register: output +- name: Assert results match expectations + assert: + that: + - not output.changed + - output.groups | length == 1 + - output.groups[0].id + - output.groups[0].name == 'zone-group-{{ rpfx }}' + - output.groups[0].provisioning_state == 'Succeeded' + - output.groups[0].private_dns_zone_configs | length == 1 + - output.groups[0].private_dns_zone_configs[0].name == 'default' + - output.groups[0].private_dns_zone_configs[0].private_dns_zone_id + - output.groups[0].private_dns_zone_configs[0].record_sets | length == 1 + - output.groups[0].private_dns_zone_configs[0].record_sets[0].fqdn == 'postgresqlsrv-{{ rpfx }}.privatelink.postgres.database.azure.com' + - output.groups[0].private_dns_zone_configs[0].record_sets[0].ip_addresses | length == 1 + - output.groups[0].private_dns_zone_configs[0].record_sets[0].ip_addresses[0] is match('^10.1.*') + - output.groups[0].private_dns_zone_configs[0].record_sets[0].provisioning_state == 'Succeeded' + - output.groups[0].private_dns_zone_configs[0].record_sets[0].record_set_name == 'postgresqlsrv-{{ rpfx }}' + - output.groups[0].private_dns_zone_configs[0].record_sets[0].record_type == 'A' + - output.groups[0].private_dns_zone_configs[0].record_sets[0].ttl + +- name: Update zone group for private endpoint + azure_rm_privateendpointdnszonegroup: + name: "zone-group-{{ rpfx }}" + private_endpoint: "privateendpoint-{{ rpfx }}" + resource_group: "{{ resource_group }}" + private_dns_zone_configs: + - name: "default-updated" + private_dns_zone: "privatelink.postgres.database.azure.com" + register: output +- name: Assert results match expectations + assert: + that: + - output.changed + - output.state.id + - output.state.name == 'zone-group-{{ rpfx }}' + - output.state.provisioning_state == 'Succeeded' + - output.state.private_dns_zone_configs | length == 1 + - output.state.private_dns_zone_configs[0].name == 'default-updated' + - output.state.private_dns_zone_configs[0].private_dns_zone_id + - output.state.private_dns_zone_configs[0].record_sets | length == 1 + - output.state.private_dns_zone_configs[0].record_sets[0].fqdn == 'postgresqlsrv-{{ rpfx }}.privatelink.postgres.database.azure.com' + - output.state.private_dns_zone_configs[0].record_sets[0].ip_addresses | length == 1 + - output.state.private_dns_zone_configs[0].record_sets[0].ip_addresses[0] is match('^10.1.*') + - output.state.private_dns_zone_configs[0].record_sets[0].provisioning_state == 'Succeeded' + - output.state.private_dns_zone_configs[0].record_sets[0].record_set_name == 'postgresqlsrv-{{ rpfx }}' + - output.state.private_dns_zone_configs[0].record_sets[0].record_type == 'A' + - output.state.private_dns_zone_configs[0].record_sets[0].ttl + +- name: Delete zone group for private endpoint - check mode + azure_rm_privateendpointdnszonegroup: + name: "zone-group-{{ rpfx }}" + private_endpoint: "privateendpoint-{{ rpfx }}" + resource_group: "{{ resource_group }}" + state: "absent" + register: output + check_mode: true +- name: Assert results match expectations + assert: + that: + - output.changed + +- name: Delete zone group for private endpoint + azure_rm_privateendpointdnszonegroup: + name: "zone-group-{{ rpfx }}" + private_endpoint: "privateendpoint-{{ rpfx }}" + resource_group: "{{ resource_group }}" + state: "absent" + register: output +- name: Assert results match expectations + assert: + that: + - output.changed + +- name: Delete non-existant zone group for private endpoint + azure_rm_privateendpointdnszonegroup: + name: "zone-group-{{ rpfx }}" + private_endpoint: "privateendpoint-{{ rpfx }}" + resource_group: "{{ resource_group }}" + state: "absent" + register: output +- name: Assert results match expectations + assert: + that: + - not output.changed diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatelinkservice/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatelinkservice/aliases new file mode 100644 index 000000000..aa77c071a --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatelinkservice/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group2 +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatelinkservice/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatelinkservice/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatelinkservice/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatelinkservice/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatelinkservice/tasks/main.yml new file mode 100644 index 000000000..bc0363c56 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatelinkservice/tasks/main.yml @@ -0,0 +1,284 @@ +- name: Set Private Link Service Names + set_fact: + rpfx: "{{ resource_group | hash('md5') | truncate(20, True, '') }}" + azure_subscription_id: f64d4ee8-be94-457d-ba26-3fa6b6506cef + +- name: Delete the private link service + azure_rm_privatelinkservice: + name: "lsp{{ rpfx }}" + resource_group: "{{ resource_group }}" + state: absent + +- name: Create virtual network + azure_rm_virtualnetwork: + resource_group: "{{ resource_group }}" + name: "vnet{{ rpfx }}" + address_prefixes_cidr: + - 10.1.0.0/16 + - 172.100.0.0/16 + +- name: Create a subnet + azure_rm_subnet: + resource_group: "{{ resource_group }}" + name: "sub{{ rpfx }}" + virtual_network_name: "vnet{{ rpfx }}" + address_prefix_cidr: "10.1.0.0/24" + private_link_service_network_policies: Disabled + private_endpoint_network_policies: Disabled + register: subnet_output + +- name: create public ip + azure_rm_publicipaddress: + resource_group: '{{ resource_group }}' + name: "pip{{ rpfx }}" + sku: Standard + allocation_method: Static + +- name: create load balancer with frontend_ip_configurations + azure_rm_loadbalancer: + resource_group: '{{ resource_group }}' + name: "lb{{ rpfx }}" + sku: Standard + frontend_ip_configurations: + - name: frontendipconf0 + public_ip_address: "pip{{ rpfx }}" + backend_address_pools: + - name: backendaddrpool0 + probes: + - name: prob0 + port: 80 + load_balancing_rules: + - name: lbrbalancingrule0 + frontend_ip_configuration: frontendipconf0 + backend_address_pool: backendaddrpool0 + frontend_port: 80 + backend_port: 80 + probe: prob0 + inbound_nat_rules: + - name: inboundnatrule0 + backend_port: 8080 + protocol: Tcp + frontend_port: 8080 + frontend_ip_configuration: frontendipconf0 + register: lb_output + +- name: Create private link service (Check mode test) + azure_rm_privatelinkservice: + name: "lsp{{ rpfx }}" + resource_group: "{{ resource_group }}" + enable_proxy_protocol: True + fqdns: + - 'dns01.com' + - 'dns02.com' + visibility: + subscriptions: + - "{{ azure_subscription_id }}" + auto_approval: + subscriptions: + - "{{ azure_subscription_id }}" + load_balancer_frontend_ip_configurations: + - id: "{{ lb_output.state.frontend_ip_configurations[0].id }}" + ip_configurations: + - name: ip_configuration01 + properties: + primary: True + private_ip_allocation_method: 'Dynamic' + private_ip_address_version: 'IPv4' + subnet: + id: "{{ subnet_output.state.id }}" + tags: + key1: value1 + check_mode: True + register: output + +- assert: + that: output.changed + +- name: Create private link service + azure_rm_privatelinkservice: + name: "lsp{{ rpfx }}" + resource_group: "{{ resource_group }}" + enable_proxy_protocol: True + fqdns: + - 'dns01.com' + - 'dns02.com' + visibility: + subscriptions: + - "{{ azure_subscription_id }}" + auto_approval: + subscriptions: + - "{{ azure_subscription_id }}" + load_balancer_frontend_ip_configurations: + - id: "{{ lb_output.state.frontend_ip_configurations[0].id }}" + ip_configurations: + - name: ip_configuration01 + properties: + primary: True + private_ip_allocation_method: 'Dynamic' + private_ip_address_version: 'IPv4' + subnet: + id: "{{ subnet_output.state.id }}" + tags: + key1: value1 + register: output + +- assert: + that: output.changed + +- name: Create private link service (Idempotent test) + azure_rm_privatelinkservice: + name: "lsp{{ rpfx }}" + resource_group: "{{ resource_group }}" + enable_proxy_protocol: True + fqdns: + - 'dns01.com' + - 'dns02.com' + visibility: + subscriptions: + - "{{ azure_subscription_id }}" + auto_approval: + subscriptions: + - "{{ azure_subscription_id }}" + load_balancer_frontend_ip_configurations: + - id: "{{ lb_output.state.frontend_ip_configurations[0].id }}" + ip_configurations: + - name: ip_configuration01 + properties: + primary: True + private_ip_allocation_method: 'Dynamic' + private_ip_address_version: 'IPv4' + subnet: + id: "{{ subnet_output.state.id }}" + tags: + key1: value1 + register: output + +- assert: + that: not output.changed + +- name: Update private link service + azure_rm_privatelinkservice: + name: "lsp{{ rpfx }}" + resource_group: "{{ resource_group }}" + enable_proxy_protocol: False + fqdns: + - 'dns03.com' + - 'dns04.com' + visibility: + subscriptions: + - "{{ azure_subscription_id }}" + auto_approval: + subscriptions: + - "{{ azure_subscription_id }}" + load_balancer_frontend_ip_configurations: + - id: "{{ lb_output.state.frontend_ip_configurations[0].id }}" + ip_configurations: + - name: ip_configuration01 + properties: + primary: True + private_ip_allocation_method: 'Dynamic' + private_ip_address_version: 'IPv4' + subnet: + id: "{{ subnet_output.state.id }}" + tags: + key2: value2 + key3: value3 + register: output + +- assert: + that: output.changed + +- name: Get private link service info by name + azure_rm_privatelinkservice_info: + name: "lsp{{ rpfx }}" + resource_group: "{{ resource_group }}" + register: output + +- assert: + that: + - output.link_service[0].tags | length == 3 + - output.link_service[0].fqdns | length == 4 + - output.link_service[0].enable_proxy_protocol == false + +- name: Create private endpoint + azure_rm_privateendpoint: + name: "pd{{ rpfx }}" + resource_group: "{{ resource_group }}" + private_link_service_connections: + - name: private_connection_name + private_link_service_id: "{{ output.link_service[0].id }}" + subnet: + id: "{{ subnet_output.state.id }}" + register: output + +- name: Get private endpoint connection info + azure_rm_privateendpointconnection_info: + service_name: "lsp{{ rpfx }}" + resource_group: "{{ resource_group }}" + register: output + +- assert: + that: + - output.endpoint_connection[0].private_link_service_connection_state.actions_required == "None" + - output.endpoint_connection[0].private_link_service_connection_state.description == "Approved" + - output.endpoint_connection[0].private_link_service_connection_state.status == "Approved" + +- name: Update private endpoint connection + azure_rm_privateendpointconnection: + name: "{{ output.endpoint_connection[0].name }}" + service_name: "lsp{{ rpfx }}" + resource_group: "{{ resource_group }}" + connection_state: + status: "Rejected" + description: "Rejected" + actions_required: "actions required" + register: output + +- assert: + that: output.changed + +- name: Get private endpoint connection info by name + azure_rm_privateendpointconnection_info: + name: "{{ output.state.name }}" + service_name: "lsp{{ rpfx }}" + resource_group: "{{ resource_group }}" + register: output + +- assert: + that: + - output.endpoint_connection[0].private_link_service_connection_state.actions_required == "actions required" + - output.endpoint_connection[0].private_link_service_connection_state.description == "Rejected" + - output.endpoint_connection[0].private_link_service_connection_state.status == "Rejected" + +- name: Delete private endpoint connection + azure_rm_privateendpointconnection: + name: "{{ output.endpoint_connection[0].name }}" + service_name: "lsp{{ rpfx }}" + resource_group: "{{ resource_group }}" + state: absent + register: output + +- assert: + that: output.changed + +- name: Delete private endpoint + azure_rm_privateendpoint: + name: "pd{{ rpfx }}" + resource_group: "{{ resource_group }}" + state: absent + +- assert: + that: output.changed + +- name: Pause for 5 mimutes to waiting + shell: sleep 300 + +- name: Delete private link service + azure_rm_privatelinkservice: + name: "lsp{{ rpfx }}" + resource_group: "{{ resource_group }}" + state: absent + register: output + +- assert: + that: output.changed diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_proximityplacementgroup/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_proximityplacementgroup/aliases new file mode 100644 index 000000000..5d29c6c4d --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_proximityplacementgroup/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group10 +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_proximityplacementgroup/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_proximityplacementgroup/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_proximityplacementgroup/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_proximityplacementgroup/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_proximityplacementgroup/tasks/main.yml new file mode 100644 index 000000000..bf7dbfd0b --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_proximityplacementgroup/tasks/main.yml @@ -0,0 +1,76 @@ +- name: Create proximity placement group name + set_fact: + group_name: "ppg{{ resource_group | hash('md5') | truncate(22, True, '') }}" + +- name: Create a proximity placement group + azure_rm_proximityplacementgroup: + resource_group: "{{ resource_group }}" + location: eastus + name: "{{ group_name }}" + state: present + register: results + +- name: Assert that placement group is created + assert: + that: results.changed + +- name: Create a proximity placement group again (Idempotent test) + azure_rm_proximityplacementgroup: + resource_group: "{{ resource_group }}" + location: eastus + name: "{{ group_name }}" + state: present + register: results + +- name: Assert that output is not changed + assert: + that: not results.changed + +- name: Update a proximity placement group + azure_rm_proximityplacementgroup: + resource_group: "{{ resource_group }}" + location: eastus + name: "{{ group_name }}" + tags: + key1: "value1" + state: present + register: results + +- name: Assert that placement group is updated + assert: + that: results.changed + +- name: Get proximity placement group facts + azure_rm_proximityplacementgroup_info: + resource_group: "{{ resource_group }}" + name: "{{ group_name }}" + register: results + +- assert: + that: + - not results.changed + - results.proximityplacementgroups[0].name == "{{ group_name }}" + - results.proximityplacementgroups[0].location == "eastus" + - results.proximityplacementgroups[0].proximity_placement_group_type == "Standard" + +- name: Delete proximity placement group + azure_rm_proximityplacementgroup: + resource_group: "{{ resource_group }}" + name: "{{ group_name }}" + state: absent + register: results + +- name: Assert that placement group is deleted + assert: + that: results.changed + +- name: Delete proximity placement group again (Idempotent test) + azure_rm_proximityplacementgroup: + resource_group: "{{ resource_group }}" + name: "{{ group_name }}" + state: absent + register: results + +- name: Asset that output is not changed + assert: + that: not results.changed diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_publicipaddress/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_publicipaddress/aliases new file mode 100644 index 000000000..c8f442a8f --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_publicipaddress/aliases @@ -0,0 +1,4 @@ +cloud/azure +shippable/azure/group2 +destructive +azure_rm_publicipaddress_info diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_publicipaddress/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_publicipaddress/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_publicipaddress/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_publicipaddress/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_publicipaddress/tasks/main.yml new file mode 100644 index 000000000..119747b26 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_publicipaddress/tasks/main.yml @@ -0,0 +1,139 @@ +- name: Create domain name + set_fact: + domain_name: "ansible-{{ resource_group | hash('md5') | truncate(24, True, '') }}" + rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + +- name: Remove public ip + azure_rm_publicipaddress: + resource_group: "{{ resource_group }}" + name: "pip{{ rpfx }}" + state: absent + +- name: Create public ip + azure_rm_publicipaddress: + resource_group: "{{ resource_group }}" + name: "pip{{ rpfx }}" + allocation_method: Static + domain_name: "{{ domain_name }}" + tags: + testing: testing + delete: on-exit + register: output + +- assert: + that: + - output.state.public_ip_allocation_method == 'static' + - output.state.dns_settings.domain_name_label == domain_name + - output.state.tags | length == 2 + - output.state.tags.testing == 'testing' + +- name: Create public ip with IPV6 + azure_rm_publicipaddress: + resource_group: "{{ resource_group }}" + name: "pip{{ rpfx }}-02" + allocation_method: Static + domain_name: "{{ domain_name }}-02" + version: 'ipv6' + sku: 'Standard' + zones: + - 1 + register: output + +- assert: + that: + - output.state.public_ip_address_version == 'ipv6' + - output.state.zones == ['1'] + +- name: Should be idempotent + azure_rm_publicipaddress: + resource_group: "{{ resource_group }}" + name: "pip{{ rpfx }}" + allocation_method: static + domain_name: "{{ domain_name }}" + register: output + +- assert: + that: not output.changed + +- name: Update tags + azure_rm_publicipaddress: + resource_group: "{{ resource_group }}" + name: "pip{{ rpfx }}" + allocation_method: static + domain_name: "{{ domain_name }}" + append_tags: yes + tags: + delete: never + foo: bar + register: output + +- assert: + that: + - output.state.tags | length == 3 + - output.state.tags.delete == 'never' + +- name: Gather facts, filtering by tag + azure_rm_publicipaddress_info: + resource_group: "{{ resource_group }}" + tags: + - testing + - foo:bar + register: infos + +- assert: + that: infos.publicipaddresses | length == 1 + +- name: Purge all tags + azure_rm_publicipaddress: + resource_group: "{{ resource_group }}" + name: "pip{{ rpfx }}" + allocation_method: static + domain_name: "{{ domain_name }}" + append_tags: no + register: output + +- assert: + that: + - output.state.tags | length == 0 + +- name: Gather facts for a public ip + azure_rm_publicipaddress_info: + resource_group: "{{ resource_group }}" + name: "pip{{ rpfx }}" + register: pip + +- assert: + that: + - "pip.publicipaddresses | length == 1" + - pip.publicipaddresses[0].name == "pip{{ rpfx }}" + - pip.publicipaddresses[0].allocation_method == 'static' + - pip.publicipaddresses[0].dns_settings.domain_name_label == domain_name + +- name: Gather facts for all public ips + azure_rm_publicipaddress_info: + resource_group: "{{ resource_group }}" + register: infos + +- assert: + that: infos.publicipaddresses | length > 0 + +- name: Remove IPV6 public ip + azure_rm_publicipaddress: + resource_group: "{{ resource_group }}" + name: "pip{{ rpfx }}-02" + state: absent + +- name: Remove public ip + azure_rm_publicipaddress: + resource_group: "{{ resource_group }}" + name: "pip{{ rpfx }}" + state: absent + +- name: Gather facts for a public ip + azure_rm_publicipaddress_info: + resource_group: "{{ resource_group }}" + name: "pip{{ rpfx }}" + register: infos + +- assert: + that: infos.publicipaddresses | length == 0 diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_recoveryservicesvault/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_recoveryservicesvault/aliases new file mode 100644 index 000000000..cc941b59c --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_recoveryservicesvault/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group12 +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_recoveryservicesvault/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_recoveryservicesvault/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_recoveryservicesvault/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_recoveryservicesvault/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_recoveryservicesvault/tasks/main.yml new file mode 100644 index 000000000..fc92da931 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_recoveryservicesvault/tasks/main.yml @@ -0,0 +1,55 @@ +- name: Fix resource prefix + set_fact: + name: "revault{{ resource_group | hash('md5') | truncate(22, True, '') }}" + location: "eastus" + +- name: Create Azure Recovery Service vault + azure_rm_recoveryservicesvault: + resource_group: "{{ resource_group }}" + name: "{{ name }}" + location: "{{ location }}" + state: "present" + register: output + +- name: Assert that output has changed + assert: + that: + - output.changed + +- name: Create Azure Recovery Service vault (idempotent) + azure_rm_recoveryservicesvault: + resource_group: "{{ resource_group }}" + name: "{{ name }}" + location: "{{ location }}" + state: "present" + register: output + +- name: Assert that output has no changed + assert: + that: + - not output.changed + +- name: Get Azure Recovery Service Vault Details + azure_rm_recoveryservicesvault_info: + resource_group: "{{ resource_group }}" + name: "{{ name }}" + register: output + +- name: Assert that output has changed + assert: + that: + - output.response.id != None + - output.response.name != None + +- name: Delete Azure Recovery Service vault + azure_rm_recoveryservicesvault: + resource_group: "{{ resource_group }}" + name: "{{ name }}" + location: "{{ location }}" + state: "absent" + register: output + +- name: Assert that output has changed + assert: + that: + - output.changed diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_rediscache/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_rediscache/aliases new file mode 100644 index 000000000..733b37851 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_rediscache/aliases @@ -0,0 +1,6 @@ +cloud/azure +shippable/azure/group2 +unsupported +destructive +azure_rm_rediscache_facts +azure_rm_rediscachefirewallrule diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_rediscache/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_rediscache/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_rediscache/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_rediscache/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_rediscache/tasks/main.yml new file mode 100644 index 000000000..18e71e56e --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_rediscache/tasks/main.yml @@ -0,0 +1,492 @@ +- name: Fix resource prefix + set_fact: + redis_name: "redis-{{ resource_group | hash('md5') | truncate(7, True, '') }}-{{ 1000 | random }}" + vnet_name: "vnet-{{ resource_group | hash('md5') | truncate(7, True, '') }}-{{ 1000 | random }}" + subnet_name: "subnet-{{ resource_group | hash('md5') | truncate(7, True, '') }}-{{ 1000 | random }}" + rule_name: "rule1" + run_once: yes + +- name: Create a redis cache (Check Mode) + azure_rm_rediscache: + resource_group: "{{ resource_group }}" + name: "{{ redis_name }}" + sku: + name: basic + size: C1 + wait_for_provisioning: False + check_mode: yes + register: output + +- name: Assert creating redis cache check mode + assert: + that: + - output.changed + +- name: Create a redis cache + azure_rm_rediscache: + resource_group: "{{ resource_group }}" + name: "{{ redis_name }}" + sku: + name: basic + size: C1 + wait_for_provisioning: False + register: output + +- name: Assert creating redis cache + assert: + that: + - output.changed + - output.id + +- name: Get facts + azure_rm_rediscache_info: + resource_group: "{{ resource_group }}" + name: "{{ redis_name }}" + register: facts + +- name: Assert facts + assert: + that: + - facts.rediscaches | length == 1 + - facts.rediscaches[0].id != None + - facts.rediscaches[0].host_name != None + - facts.rediscaches[0].provisioning_state != None + - facts.rediscaches[0].sku.name == 'basic' + - facts.rediscaches[0].sku.size == 'C1' + +- name: Update the redis cache (idempotent) + azure_rm_rediscache: + resource_group: "{{ resource_group }}" + name: "{{ redis_name }}" + sku: + name: basic + size: C1 + wait_for_provisioning: False + register: output + +- name: assert output not changed + assert: + that: + - not output.changed + + +- name: long-running rediscache tests [run with `--tags long_run,untagged` to enable] + # creating redis Cache costs about 20 mins async operation, + # need to poll status from Creating to Running, then able to perform updating/deleting operation, + # otherwise, will met error: + # "The resource '' is busy processing a previous update request or is undergoing system maintenance. + # As such, it is currently unable to accept the update request. Please try again later." + block: + - name: Wait for Redis provisioning to complete + azure_rm_rediscache_info: + resource_group: "{{ resource_group }}" + name: "{{ redis_name }}" + register: facts + until: facts.rediscaches[0]['provisioning_state'] == 'Succeeded' + retries: 30 + delay: 60 + + - name: (actually) update redis cache + azure_rm_rediscache: + resource_group: "{{ resource_group }}" + name: "{{ redis_name }}" + sku: + name: basic + size: C1 + enable_non_ssl_port: true + tags: + testing: foo + wait_for_provisioning: True + register: output + + - name: assert output changed + assert: + that: + - output.changed + + - name: Update redis cache configuration + azure_rm_rediscache: + resource_group: "{{ resource_group }}" + name: "{{ redis_name }}" + sku: + name: basic + size: C1 + enable_non_ssl_port: True + maxmemory_policy: allkeys_lru + tags: + testing: foo + register: output + + - name: assert output changed + assert: + that: + - output.changed + + - name: Scale up the redis cache + azure_rm_rediscache: + resource_group: "{{ resource_group }}" + name: "{{ redis_name }}" + sku: + name: standard + size: C1 + tags: + testing: foo + wait_for_provisioning: True + register: output + + - assert: + that: + - output.changed + + - name: Force reboot redis cache + azure_rm_rediscache: + resource_group: "{{ resource_group }}" + name: "{{ redis_name }}" + reboot: + reboot_type: all + register: output + + - name: assert redis rebooted + assert: + that: + - output.changed + + - name: Delete the redis cache (Check Mode) + azure_rm_rediscache: + resource_group: "{{ resource_group }}" + name: "{{ redis_name }}" + state: absent + check_mode: yes + register: output + + - name: assert deleting redis cache check mode + assert: + that: output.changed + + - name: Delete the redis cache + azure_rm_rediscache: + resource_group: "{{ resource_group }}" + name: "{{ redis_name }}" + state: absent + register: output + + - assert: + that: + - output.changed + tags: [long_run, never] + + +- name: Create a redis cache with enhanced configuration options + azure_rm_rediscache: + resource_group: "{{ resource_group }}" + name: "{{ redis_name }}-enhanced" + sku: + name: basic + size: C1 + redis_version: "6" + minimum_tls_version: "1.2" + public_network_access: "Disabled" + wait_for_provisioning: false + register: output +- name: Assert creating redis cache + assert: + that: + - output.changed + +- name: Get facts for enhanced cache + azure_rm_rediscache_info: + resource_group: "{{ resource_group }}" + name: "{{ redis_name }}-enhanced" + register: facts +- name: Assert enhanced cache facts + assert: + that: + - facts.rediscaches | length == 1 + - facts.rediscaches[0].id != None + - facts.rediscaches[0].host_name != None + - facts.rediscaches[0].provisioning_state != None + - facts.rediscaches[0].sku.name == 'basic' + - facts.rediscaches[0].sku.size == 'C1' + - facts.rediscaches[0].redis_version is version('6', '>=') and facts.rediscaches[0].redis_version is version('7', '<') + - facts.rediscaches[0].minimum_tls_version == '1.2' + - facts.rediscaches[0].public_network_access == 'Disabled' + + +- name: long-running enhanced rediscache tests [run with `--tags long_run,untagged` to enable] + # creating redis Cache costs about 20 mins async operation, + # need to poll status from Creating to Running, then able to perform updating/deleting operation, + # otherwise, will met error: + # "The resource '' is busy processing a previous update request or is undergoing system maintenance. + # As such, it is currently unable to accept the update request. Please try again later." + block: + - name: Wait for Redis provisioning to complete + azure_rm_rediscache_info: + resource_group: "{{ resource_group }}" + name: "{{ redis_name }}-enhanced" + register: facts + until: facts.rediscaches[0]['provisioning_state'] == 'Succeeded' + retries: 30 + delay: 60 + + - name: update redis (idempotent) + azure_rm_rediscache: + resource_group: "{{ resource_group }}" + name: "{{ redis_name }}-enhanced" + sku: + name: basic + size: C1 + redis_version: "6" + minimum_tls_version: "1.2" + public_network_access: "Disabled" + wait_for_provisioning: true + register: output + - name: assert output not changed + assert: + that: + - not output.changed + + - name: update redis cache TLS + azure_rm_rediscache: + resource_group: "{{ resource_group }}" + name: "{{ redis_name }}-enhanced" + sku: + name: basic + size: C1 + redis_version: "6" + minimum_tls_version: "1.1" + public_network_access: "Disabled" + wait_for_provisioning: true + register: output + - name: assert output changed + assert: + that: + - output.changed + + - name: update redis cache public network access + azure_rm_rediscache: + resource_group: "{{ resource_group }}" + name: "{{ redis_name }}-enhanced" + sku: + name: basic + size: C1 + redis_version: "6" + minimum_tls_version: "1.1" + public_network_access: "Enabled" + wait_for_provisioning: true + register: output + - name: assert output changed + assert: + that: + - output.changed + + - name: Get facts for enhanced cache + azure_rm_rediscache_info: + resource_group: "{{ resource_group }}" + name: "{{ redis_name }}-enhanced" + register: facts + - name: Assert enhanced cache facts + assert: + that: + - facts.rediscaches[0].minimum_tls_version == '1.1' + - facts.rediscaches[0].public_network_access == 'Enabled' + + - name: update redis cache version + azure_rm_rediscache: + resource_group: "{{ resource_group }}" + name: "{{ redis_name }}-enhanced" + sku: + name: basic + size: C1 + redis_version: "6" + minimum_tls_version: "1.1" + public_network_access: "Enabled" + wait_for_provisioning: true + register: output + failed_when: "output.changed or 'not supported' not in output.msg" + + - name: Delete the redis cache + azure_rm_rediscache: + resource_group: "{{ resource_group }}" + name: "{{ redis_name }}-enhanced" + state: absent + register: output + - assert: + that: + - output.changed + tags: [long_run, never] + + +- name: Create virtual network + azure_rm_virtualnetwork: + resource_group: "{{ resource_group }}" + name: "{{ vnet_name }}" + address_prefixes: "10.10.0.0/16" + +- name: Add subnet + azure_rm_subnet: + resource_group: "{{ resource_group }}" + name: "{{ subnet_name }}" + address_prefix: "10.10.0.0/24" + virtual_network: "{{ vnet_name }}" + +- name: Create redis with subnet + azure_rm_rediscache: + resource_group: "{{ resource_group }}" + name: "{{ redis_name }}2" + sku: + name: premium + size: P1 + subnet: + name: "{{ subnet_name }}" + virtual_network_name: "{{ vnet_name }}" + wait_for_provisioning: False + register: output + +- name: Assert creating redis cache + assert: + that: + - output.changed + - output.id + +- name: Get facts + azure_rm_rediscache_info: + resource_group: "{{ resource_group }}" + name: "{{ redis_name }}2" + return_access_keys: True + register: facts + +- name: Assert facts + assert: + that: + - facts.rediscaches | length == 1 + - facts.rediscaches[0].subnet != None + - facts.rediscaches[0].access_keys.primary != None + +- name: Create firewall rule (Check mode) + azure_rm_rediscachefirewallrule: + resource_group: "{{ resource_group }}" + cache_name: "{{ redis_name }}2" + name: "{{ rule_name }}" + start_ip_address: 192.168.1.1 + end_ip_address: 192.168.1.4 + check_mode: yes + register: output + +- name: Assert check mode creation + assert: + that: + - output.changed + + +- name: long-running key/firewallrule tests [run with `--tags long_run,untagged` to enable] +# Creating firewall rule need Redis status is running, while creating redis Cache costs about 20 mins async operation, +# need to poll status from Creating to Running, then able to perform firewall rule creating, +# otherwise, will met error: +# "Error creating/updating Firewall rule of Azure Cache for Redis: Azure Error: Conflict\nMessage: The resource +# '/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Cache/Redis/myRedis' is busy processing +# a previous update request or is undergoing system maintenance. As such, it is currently unable to accept the update request. Please try again later." + block: + - name: Wait for Redis provisioning to complete + azure_rm_rediscache_info: + resource_group: "{{ resource_group }}" + name: "{{ redis_name }}2" + register: facts + until: facts.rediscaches[0]['provisioning_state'] == 'Succeeded' + retries: 30 + delay: 60 + + - name: Create firewall rule + azure_rm_rediscachefirewallrule: + resource_group: "{{ resource_group }}" + cache_name: "{{ redis_name }}2" + name: "{{ rule_name }}" + start_ip_address: 192.168.1.1 + end_ip_address: 192.168.1.4 + register: output + + - name: Assert creation + assert: + that: + - output.changed + - output.id + + - name: Update firewall rule idempotence + azure_rm_rediscachefirewallrule: + resource_group: "{{ resource_group }}" + cache_name: "{{ redis_name }}2" + name: "{{ rule_name }}" + start_ip_address: 192.168.1.1 + end_ip_address: 192.168.1.4 + register: output + + - name: Assert idempotence + assert: + that: + - output.changed == False + + - name: Update firewall rule + azure_rm_rediscachefirewallrule: + resource_group: "{{ resource_group }}" + cache_name: "{{ redis_name }}2" + name: "{{ rule_name }}" + end_ip_address: 192.168.1.5 + register: output + + - name: Assert updating + assert: + that: + - output.changed + + - name: Get key facts + azure_rm_rediscache_info: + resource_group: "{{ resource_group }}" + name: "{{ redis_name }}2" + return_access_keys: True + register: key_facts + + - name: regenerate primary redis key + azure_rm_rediscache: + resource_group: "{{ resource_group }}" + name: "{{ redis_name }}2" + regenerate_key: + key_type: "primary" + register: output + + - name: Assert output + assert: + that: + - output.changed + + - name: Get facts after key regeneration + azure_rm_rediscache_info: + resource_group: "{{ resource_group }}" + name: "{{ redis_name }}2" + return_access_keys: true + register: key_facts2 + + - name: Assert key change + assert: + that: + - key_facts.rediscaches[0].access_keys.primary != key_facts2.rediscaches[0].access_keys.primary + - key_facts.rediscaches[0].access_keys.secondary == key_facts2.rediscaches[0].access_keys.secondary + + - name: Delete firewall rule + azure_rm_rediscachefirewallrule: + resource_group: "{{ resource_group }}" + cache_name: "{{ redis_name }}2" + name: "{{ rule_name }}" + state: absent + register: output + + - name: Assert deletion + assert: + that: + - output.changed + + - name: Delete the redis cache + azure_rm_rediscache: + resource_group: "{{ resource_group }}" + name: "{{ redis_name }}2" + state: absent + register: output + tags: [long_run, never] diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_registrationassignment/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_registrationassignment/aliases new file mode 100644 index 000000000..759eafa2d --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_registrationassignment/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group3 +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_registrationassignment/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_registrationassignment/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_registrationassignment/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_registrationassignment/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_registrationassignment/tasks/main.yml new file mode 100644 index 000000000..055524705 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_registrationassignment/tasks/main.yml @@ -0,0 +1,78 @@ +- name: set facts + set_fact: + subscription_id: "{{ azure_subscription_id }}" + managed_by_tenant_id: "{{ azure_managed_by_tenant_id }}" + principal_id: "{{ azure_principal_id }}" + role_definition_id: "{{ azure_role_definition_id }}" + run_once: yes + +- name: Create a RegistrationDefinition + azure_rm_registrationdefinition: + properties: + description: first_test + authorizations: + - principal_id: "{{ principal_id }}" + role_definition_id: "{{ role_definition_id }}" + managed_by_tenant_id: "{{ managed_by_tenant_id }}" + registration_definition_name: test_def + register: output1 + +- name: Create a RegistrationAssignment ---check mode + azure_rm_registrationassignment: + scope: subscriptions/{{ subscription_id }} + properties: + registration_definition_id: "{{ output1.state.id }}" + register: output + check_mode: yes + +- assert: + that: + - output.changed + +- name: Create a RegistrationAssignment + azure_rm_registrationassignment: + scope: subscriptions/{{ subscription_id }} + properties: + registration_definition_id: "{{ output1.state.id }}" + register: output2 + +- assert: + that: + - output2.changed + +- name: Create a RegistrationAssignment -- idempotent + azure_rm_registrationassignment: + scope: subscriptions/{{ subscription_id }} + registration_assignment_id: "{{ output2.state.name }}" + properties: + registration_definition_id: "{{ output1.state.id }}" + register: output + +- assert: + that: + - not output.changed + +- name: Get a RegistrationAssignment + azure_rm_registrationassignment_info: + scope: subscriptions/{{ subscription_id }} + registration_assignment_id: "{{ output2.state.name }}" + register: output + +- assert: + that: + - output.registration_assignments[0].properties.registration_definition_id == "{{ output1.state.id }}" + +- name: Get all RegistrationAssignment + azure_rm_registrationassignment_info: + scope: subscriptions/{{ subscription_id }} + register: output + +- assert: + that: + - output.registration_assignments | length >= 1 + +- name: Delete the RegistrationAssignment + azure_rm_registrationassignment: + scope: subscriptions/{{ subscription_id }} + registration_assignment_id: "{{ output2.state.name }}" + state: absent diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_registrationdefinition/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_registrationdefinition/aliases new file mode 100644 index 000000000..759eafa2d --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_registrationdefinition/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group3 +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_registrationdefinition/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_registrationdefinition/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_registrationdefinition/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_registrationdefinition/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_registrationdefinition/tasks/main.yml new file mode 100644 index 000000000..0395db002 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_registrationdefinition/tasks/main.yml @@ -0,0 +1,134 @@ +- name: set facts + set_fact: + subscription_id: "{{ azure_subscription_id }}" + subscription_sec_id: "{{ azure_subscription_sec_id }}" + managed_by_tenant_id: "{{ azure_managed_by_tenant_id }}" + principal_id: "{{ azure_principal_id }}" + role_definition_id: "{{ azure_role_definition_id }}" + reg_def_name: test_name + run_once: yes + +- name: Create a RegistrationDefinition -- check mode + azure_rm_registrationdefinition: + properties: + description: first_test + authorizations: + - principal_id: "{{ principal_id }}" + role_definition_id: "{{ role_definition_id }}" + managed_by_tenant_id: "{{ managed_by_tenant_id }}" + registration_definition_name: "{{ reg_def_name }}" + check_mode: yes + register: output + +- name: Assert creating registration definition check mode + assert: + that: + - output.changed + +- name: Create a RegistrationDefinition with scope + azure_rm_registrationdefinition: + scope: "{{ subscription_sec_id }}" + properties: + description: test definition with scope + authorizations: + - principal_id: "{{ principal_id }}" + role_definition_id: "{{ role_definition_id }}" + managed_by_tenant_id: "{{ managed_by_tenant_id }}" + registration_definition_name: "{{ reg_def_name }}" + register: output2 + +- name: Assert creating registration definition + assert: + that: + - output2.changed + +- name: Create a RegistrationDefinition + azure_rm_registrationdefinition: + properties: + description: first_test + authorizations: + - principal_id: "{{ principal_id }}" + role_definition_id: "{{ role_definition_id }}" + managed_by_tenant_id: "{{ managed_by_tenant_id }}" + registration_definition_name: "{{ reg_def_name }}" + register: output1 + +- name: Assert creating registration definition + assert: + that: + - output1.changed + +- name: Create a RegistrationDefinition (idempotent) + azure_rm_registrationdefinition: + registration_definition_id: "{{ output1.state.name }}" + properties: + description: first_test + authorizations: + - principal_id: "{{ principal_id }}" + role_definition_id: "{{ role_definition_id }}" + managed_by_tenant_id: "{{ managed_by_tenant_id }}" + registration_definition_name: "{{ reg_def_name }}" + register: output + +- name: Assert creating registration definition + assert: + that: + - not output.changed + +- name: Update the RegistrationDefinition properties description and name + azure_rm_registrationdefinition: + registration_definition_id: "{{ output1.state.name }}" + properties: + description: second_test + authorizations: + - principal_id: "{{ principal_id }}" + role_definition_id: "{{ role_definition_id }}" + managed_by_tenant_id: "{{ managed_by_tenant_id }}" + registration_definition_name: "{{ reg_def_name }}02" + register: output + +- name: Assert creating registration definition + assert: + that: + - output.changed + +- name: Get the Registration Definition info + azure_rm_registrationdefinition_info: + registration_definition_id: "{{ output1.state.name }}" + register: output + +- name: Assert the registration definition info + assert: + that: + - output.registration_definitions[0].name == "{{ output1.state.name }}" + - output.registration_definitions[0].properties.authorizations[0].principal_id == "{{ principal_id }}" + - output.registration_definitions[0].properties.authorizations[0].role_definition_id == "{{ role_definition_id }}" + - output.registration_definitions[0].properties.provisioning_state == "Succeeded" + - output.registration_definitions[0].properties.description == "second_test" + - output.registration_definitions[0].properties.registration_definition_name == "test_name02" + +- name: Get All Registration Definition info in the subscription + azure_rm_registrationdefinition_info: + scope: "{{ subscription_id }}" + register: output + +- name: Assert all the registration definition info + assert: + that: + - output.registration_definitions | length >=1 + +- name: Delete the registration definition + azure_rm_registrationdefinition: + registration_definition_id: "{{ output1.state.name }}" + state: absent + register: output + +- name: Assert delete registration definition success + assert: + that: + - output.changed + +- name: Delete the registration definition + azure_rm_registrationdefinition: + registration_definition_id: "{{ output2.state.name }}" + state: absent diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_resource/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_resource/aliases new file mode 100644 index 000000000..49acfee76 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_resource/aliases @@ -0,0 +1,3 @@ +cloud/azure +destructive +shippable/azure/group2 diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_resource/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_resource/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_resource/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_resource/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_resource/tasks/main.yml new file mode 100644 index 000000000..6ec87237d --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_resource/tasks/main.yml @@ -0,0 +1,158 @@ +- name: Prepare random number + set_fact: + nsgname: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + storageaccountname: "stacc{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + dbname: "mdb{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + run_once: yes + +- name: Call REST API + azure_rm_resource: + api_version: '2018-02-01' + resource_group: "{{ resource_group }}" + provider: network + resource_type: networksecuritygroups + resource_name: "{{ nsgname }}" + body: + location: eastus + idempotency: yes + register: output + +- name: Assert that something has changed + assert: + that: output.changed + +- name: Call REST API + azure_rm_resource: + api_version: '2018-02-01' + resource_group: "{{ resource_group }}" + provider: network + resource_type: networksecuritygroups + resource_name: "{{ nsgname }}" + body: + location: eastus + idempotency: yes + register: output + +- name: Assert that nothing has changed + assert: + that: not output.changed + +- name: Call REST API + azure_rm_resource: + api_version: '2018-02-01' + resource_group: "{{ resource_group }}" + provider: network + resource_type: networksecuritygroups + resource_name: "{{ nsgname }}" + body: + location: eastus + tags: + a: "abc" + b: "cde" + idempotency: yes + register: output + +- name: Assert that something has changed + assert: + that: output.changed + +- name: Try to get information about account + azure_rm_resource_info: + api_version: '2018-02-01' + resource_group: "{{ resource_group }}" + provider: network + resource_type: networksecuritygroups + resource_name: "{{ nsgname }}" + register: output + +- name: Assert value was returned + assert: + that: + - not output.changed + - output.response[0]['name'] != None + - output.response | length == 1 + +- name: Try to query a list + azure_rm_resource_info: + api_version: '2018-02-01' + resource_group: "{{ resource_group }}" + provider: network + resource_type: networksecuritygroups + register: output +- name: Assert value was returned + assert: + that: + - not output.changed + - output.response[0]['name'] != None + - output.response | length >= 1 + +- name: Try to query a list - same without API version + azure_rm_resource_info: + resource_group: "{{ resource_group }}" + provider: network + resource_type: networksecuritygroups + register: output +- name: Assert value was returned + assert: + that: + - not output.changed + - output.response[0]['name'] != None + - output.response | length >= 1 + +- name: Query all the resources in the resource group + azure_rm_resource_info: + resource_group: "{{ resource_group }}" + resource_type: resources + register: output +- name: Assert value was returned + assert: + that: + - not output.changed + - output.response | length >= 1 + +- name: Create storage account that requires LRO polling + azure_rm_resource: + polling_timeout: 600 + polling_interval: 60 + api_version: '2018-07-01' + resource_group: "{{ resource_group }}" + provider: Storage + resource_type: storageAccounts + resource_name: "{{ storageaccountname }}" + body: + sku: + name: Standard_GRS + kind: Storage + location: eastus + register: output + +- name: Assert that storage was successfully created + assert: + that: "output['response']['name'] == '{{ storageaccountname }}'" + + +- name: Try to storage keys -- special case when subresource part has no name + azure_rm_resource: + resource_group: "{{ resource_group }}" + provider: storage + resource_type: storageAccounts + resource_name: "{{ storageaccountname }}" + subresource: + - type: listkeys + api_version: '2018-03-01-preview' + method: POST + register: keys + +- name: Assert that key was returned + assert: + that: keys['response']['keys'][0]['value'] | length > 0 + +- name: Delete storage - without API version + azure_rm_resource: + polling_timeout: 600 + polling_interval: 60 + method: DELETE + resource_group: "{{ resource_group }}" + provider: Storage + resource_type: storageAccounts + resource_name: "{{ storageaccountname }}" diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_resourcegroup/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_resourcegroup/aliases new file mode 100644 index 000000000..0dbbfceba --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_resourcegroup/aliases @@ -0,0 +1,4 @@ +cloud/azure +shippable/azure/group3 +destructive +azure_rm_resourcegroup_info diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_resourcegroup/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_resourcegroup/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_resourcegroup/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_resourcegroup/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_resourcegroup/tasks/main.yml new file mode 100644 index 000000000..706a855e8 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_resourcegroup/tasks/main.yml @@ -0,0 +1,50 @@ +- name: Get resource group datalake info + azure_rm_resourcegroup_info: + name: "{{ resource_group_datalake }}" + list_resources: yes + register: rg + +- assert: + that: + - rg.resourcegroups | length == 1 + - rg.resourcegroups[0].resources | length >= 0 + +- name: Get resource group info + azure_rm_resourcegroup_info: + register: rg + +- assert: + that: + - rg.resourcegroups | length >= 1 + +- name: Get resource group info + azure_rm_resourcegroup_info: + name: "{{ resource_group }}" + list_resources: yes + register: rg + +- assert: + that: + - rg.resourcegroups | length == 1 + - rg.resourcegroups[0].resources | length >= 0 + +- name: Create resource group (idempontent) + azure_rm_resourcegroup: + name: "{{ resource_group }}" + location: "{{ rg.resourcegroups[0].location }}" + register: output + +- assert: + that: + - not output.changed + +- name: delete resource group + azure_rm_resourcegroup: + name: "{{ resource_group }}" + state: absent + check_mode: yes + register: output + +- assert: + that: + - output.changed \ No newline at end of file diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_roleassignment/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_roleassignment/aliases new file mode 100644 index 000000000..8767e0189 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_roleassignment/aliases @@ -0,0 +1,3 @@ +cloud/azure +destructive +shippable/azure/group10 diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_roleassignment/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_roleassignment/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_roleassignment/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_roleassignment/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_roleassignment/tasks/main.yml new file mode 100644 index 000000000..c4e4d15d6 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_roleassignment/tasks/main.yml @@ -0,0 +1,220 @@ +- name: setup basic facts + set_fact: + uuid: bb21a88b-30e1-42b5-84e8-1d3f322de033 + # Disk Backup Reader, unlikely to be already assigned in ansible-test resource groups. + az_role_definition_guid: '3e5e47e6-65f7-47ef-90b5-e5dd4d455f24' + +- name: List All + azure_rm_roleassignment_info: + register: az_role_assignments + +- name: Fetch specific assignments + azure_rm_roleassignment_info: + name: "{{ item.name | default(omit) }}" + assignee: "{{ item.assignee | default(omit) }}" + id: "{{ item.id | default(omit) }}" + role_definition_id: "{{ item.role_definition_id | default(omit) }}" + scope: "{{ item.scope | default(omit) }}" + strict_scope_match: True + register: az_role_assignment_specific + loop: + - name: "{{ az_role_assignments.roleassignments[0].name }}" + scope: "{{ az_role_assignments.roleassignments[0].scope }}" + - assignee: "{{ az_role_assignments.roleassignments[0].principal_id }}" + scope: "{{ az_role_assignments.roleassignments[0].scope }}" + role_definition_id: "{{ az_role_assignments.roleassignments[0].role_definition_id }}" + - id: "{{ az_role_assignments.roleassignments[0].id }}" + +- name: check specific fetch for single return + assert: + that: + - "{{ ( item.roleassignments | length) == 1 }}" + loop: "{{ az_role_assignment_specific.results }}" + +- name: Intentional mutual exclusion info + azure_rm_roleassignment_info: + name: "{{ item.name | default(omit) }}" + assignee: "{{ item.assignee | default(omit) }}" + id: "{{ item.id | default(omit) }}" + scope: "{{ item.scope | default(omit) }}" + register: failures_info + ignore_errors: True + loop: + - name: "{{ az_role_assignments.roleassignments[0].name }}" + assignee: "{{ az_role_assignments.roleassignments[0].principal_id }}" + - assignee: "{{ az_role_assignments.roleassignments[0].principal_id }}" + id: "{{ az_role_assignments.roleassignments[0].id }}" + - name: "{{ az_role_assignments.roleassignments[0].name }}" + id: "{{ az_role_assignments.roleassignments[0].id }}" + +- name: check intended failures + assert: + that: + - item.failed + loop: "{{ failures_info.results }}" + +- name: Intentional failures mutable + azure_rm_roleassignment: + name: "{{ item.name | default(omit) }}" + assignee_object_id: "{{ item.assignee | default(omit) }}" + id: "{{ item.id | default(omit) }}" + scope: "{{ item.scope | default(omit) }}" + role_definition_id: "{{ item.role_definition_id | default(omit) }}" + state: "{{ item.state | default(omit) }}" + register: failures_mutable + ignore_errors: True + loop: + # mutually exclusive + - scope: "{{ az_role_assignments.roleassignments[0].scope }}" + id: "{{ az_role_assignments.roleassignments[0].id }}" + # mutually exclusive + - name: "{{ az_role_assignments.roleassignments[0].name }}" + id: "{{ az_role_assignments.roleassignments[0].id }}" + # missing required role_definition_id + - scope: "{{ az_role_assignments.roleassignments[0].scope }}" + assignee_object_id: "{{ az_role_assignments.roleassignments[0].principal_id }}" + state: "present" + # missing required assignee_object_id + - scope: "{{ az_role_assignments.roleassignments[0].scope }}" + role_definition_id: "{{ az_role_assignments.roleassignments[0].role_definition_id }}" + state: "present" + # missing required role_definition_id + - scope: "{{ az_role_assignments.roleassignments[0].scope }}" + assignee_object_id: "{{ az_role_assignments.roleassignments[0].principal_id }}" + state: "absent" + # missing required assignee_object_id + - scope: "{{ az_role_assignments.roleassignments[0].scope }}" + role_definition_id: "{{ az_role_assignments.roleassignments[0].role_definition_id }}" + state: "absent" + +- name: check intended failures + assert: + that: + - item.failed + loop: "{{ failures_mutable.results }} " + +- name: get resource group info + azure_rm_resourcegroup_info: + name: "{{ resource_group }}" + register: az_resource_group + +- name: create role assignment by id + azure_rm_roleassignment: + id: "{{ az_resource_group.resourcegroups[0].id }}/providers/Microsoft.Authorization/roleAssignments/{{ uuid }}" + assignee_object_id: "{{ az_role_assignments.roleassignments[0].principal_id }}" + role_definition_id: "/subscriptions/{{ az_resource_group.resourcegroups[0].id.split('/')[2] }}/providers/Microsoft.Authorization/roleDefinitions/{{ az_role_definition_guid }}" + register: az_role_assignment_create + +- name: create role assignment by scope + azure_rm_roleassignment: + scope: "{{ az_resource_group.resourcegroups[0].id }}" + assignee_object_id: "{{ az_role_assignments.roleassignments[0].principal_id }}" + role_definition_id: "/subscriptions/{{ az_resource_group.resourcegroups[0].id.split('/')[2] }}/providers/Microsoft.Authorization/roleDefinitions/{{ az_role_definition_guid }}" + register: az_role_assignment_idempotent + +- name: check idempotence + assert: + that: + - az_role_assignment_idempotent.changed == False + +- name: List Role Assignments by Name + azure_rm_roleassignment_info: + name: "{{ az_role_assignment_create.name }}" + scope: "{{ az_role_assignment_create.scope }}" + register: az_role_assignment_by_name + +- name: List Role Assignments at scope + azure_rm_roleassignment_info: + scope: "{{ az_resource_group.resourcegroups[0].id }}" + register: az_role_assignment_by_scope + +- name: List Role Assignments at scope with strict matching + azure_rm_roleassignment_info: + scope: "{{ az_role_assignments.roleassignments[0].scope }}" + strict_scope_match: True + register: az_role_assignment_by_scope_strict + +- name: check strict scope matching + assert: + that: + - item.scope == az_role_assignments.roleassignments[0].scope + loop: "{{ az_role_assignment_by_scope_strict.roleassignments }}" + +- name: List Role Assignments at id + azure_rm_roleassignment_info: + id: "{{ az_role_assignment_create.id }}" + register: az_role_assignment_by_id + +- name: List Role Assignments by assignee + azure_rm_roleassignment_info: + assignee: "{{ az_role_assignments.roleassignments[0].principal_id }}" + register: az_role_assignment_by_assignee + +- name: Delete Role Assignment by id + azure_rm_roleassignment: + id: "{{ az_role_assignment_create.id }}" + state: absent + register: az_role_assignment_delete + when: az_role_assignment_create.changed + +- name: create role assignment with name + azure_rm_roleassignment: + scope: "{{ az_resource_group.resourcegroups[0].id }}" + assignee_object_id: "{{ az_role_assignments.roleassignments[0].principal_id }}" + role_definition_id: "/subscriptions/{{ az_resource_group.resourcegroups[0].id.split('/')[2] }}/providers/Microsoft.Authorization/roleDefinitions/{{ az_role_definition_guid }}" + name: "{{ uuid }}" + register: az_role_assignment_create + +- name: Delete Role by Name + azure_rm_roleassignment: + scope: "{{ az_resource_group.resourcegroups[0].id }}" + name: "{{ uuid }}" + state: absent + register: az_role_assignment_delete + when: az_role_assignment_create.changed + +- name: create role assignment by scope + azure_rm_roleassignment: + scope: "{{ az_resource_group.resourcegroups[0].id }}" + assignee_object_id: "{{ az_role_assignments.roleassignments[0].principal_id }}" + role_definition_id: "/subscriptions/{{ az_resource_group.resourcegroups[0].id.split('/')[2] }}/providers/Microsoft.Authorization/roleDefinitions/{{ az_role_definition_guid }}" + register: az_role_assignment_create_by_scope + +- name: delete by scope, assignee_object_id and role_definition_id + azure_rm_roleassignment: + scope: "{{ az_resource_group.resourcegroups[0].id }}" + assignee_object_id: "{{ az_role_assignments.roleassignments[0].principal_id }}" + role_definition_id: "/subscriptions/{{ az_resource_group.resourcegroups[0].id.split('/')[2] }}/providers/Microsoft.Authorization/roleDefinitions/{{ az_role_definition_guid }}" + state: absent + register: az_role_assignment_delete + when: az_role_assignment_create.changed + +- name: absent assignment that doesn't exist - id + azure.azcollection.azure_rm_roleassignment: + id: "{{ az_role_assignment_delete.id }}" + state: absent + register: absent_nochange_id + +- name: absent assignment that doesn't exist - name + azure.azcollection.azure_rm_roleassignment: + name: "{{ az_role_assignment_delete.name }}" + scope: "{{ az_role_assignment_delete.scope }}" + state: absent + register: absent_nochange_name + +- name: absent assignment that doesn't exist - properties + azure.azcollection.azure_rm_roleassignment: + scope: "{{ az_role_assignment_delete.scope }}" + assignee_object_id: "{{ az_role_assignment_delete.assignee_object_id }}" + role_definition_id: "{{ az_role_assignment_delete.role_definition_id }}" + state: absent + register: absent_nochange_properties + +- name: check intended failures info + assert: + that: + - item.changed == false + loop: + - "{{ absent_nochange_properties }}" + - "{{ absent_nochange_id }}" + - "{{ absent_nochange_name }}" diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_roledefinition/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_roledefinition/aliases new file mode 100644 index 000000000..35b940115 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_roledefinition/aliases @@ -0,0 +1,3 @@ +cloud/azure +destructive +unsupported \ No newline at end of file diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_roledefinition/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_roledefinition/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_roledefinition/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_roledefinition/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_roledefinition/tasks/main.yml new file mode 100644 index 000000000..15cf266b6 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_roledefinition/tasks/main.yml @@ -0,0 +1,211 @@ +- name: Fix resource prefix + set_fact: + role_name: "{{ (resource_group | replace('-','x'))[-8:] }}{{ 1000 | random }}testrole" + subscription_id: "{{azure_subscription_id}}" + principal_id: "{{azure_client_id}}" + run_once: yes + +- name: Create a role definition (Check Mode) + azure_rm_roledefinition: + name: "{{ role_name }}" + scope: "/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}" + permissions: + - actions: + - "Microsoft.Compute/virtualMachines/read" + not_actions: + - "Microsoft.Compute/virtualMachines/write" + data_actions: + - "Microsoft.Storage/storageAccounts/blobServices/containers/blobs/read" + not_data_actions: + - "Microsoft.Storage/storageAccounts/blobServices/containers/blobs/write" + assignable_scopes: + - "/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}" + check_mode: yes + register: output + +- name: Assert creating role definition check mode + assert: + that: + - output.changed + +- name: Create a role definition + azure_rm_roledefinition: + name: "{{ role_name }}" + scope: "/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}" + permissions: + - actions: + - "Microsoft.Compute/virtualMachines/read" + not_actions: + - "Microsoft.Compute/virtualMachines/write" + data_actions: + - "Microsoft.Storage/storageAccounts/blobServices/containers/blobs/read" + not_data_actions: + - "Microsoft.Storage/storageAccounts/blobServices/containers/blobs/write" + assignable_scopes: + - "/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}" + register: output + +- name: Assert creating role definition + assert: + that: + - output.changed + + +## because of the bug of azure service , the following tasks will cause failures randomly +# +#- name: Get facts by type +# azure_rm_roledefinition_info: +# scope: "/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}" +# type: custom +# register: facts +# +#- name: Assert facts +# assert: +# that: +# - facts['roledefinitions'] | length > 1 +# +#- name: Get facts by name +# azure_rm_roledefinition_info: +# scope: "/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}" +# role_name: "{{ role_name }}" +# register: facts +# until: facts.roledefinitions | length > 0 +# retries: 50 +# delay: 60 +# +#- name: Assert facts +# assert: +# that: +# - facts['roledefinitions'] | length == 1 +# - facts['roledefinitions'][0]['permissions'] | length == 1 +# - facts['roledefinitions'][0]['permissions'][0]['not_data_actions'] | length == 1 +# - facts['roledefinitions'][0]['permissions'][0]['data_actions'] | length == 1 +# +#- name: Update the role definition (idempotent) +# azure_rm_roledefinition: +# name: "{{ role_name }}" +# scope: "/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}" +# permissions: +# - actions: +# - "Microsoft.Compute/virtualMachines/read" +# not_actions: +# - "Microsoft.Compute/virtualMachines/write" +# data_actions: +# - "Microsoft.Storage/storageAccounts/blobServices/containers/blobs/read" +# not_data_actions: +# - "Microsoft.Storage/storageAccounts/blobServices/containers/blobs/write" +# assignable_scopes: +# - "/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}" +# register: output +# +#- name: assert output not changed +# assert: +# that: +# - not output.changed +# +#- name: Update the role definition +# azure_rm_roledefinition: +# name: "{{ role_name }}" +# description: "update description" +# scope: "/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}" +# permissions: +# - actions: +# - "Microsoft.Compute/virtualMachines/read" +# - "Microsoft.Compute/virtualMachines/start/action" +# not_actions: +# - "Microsoft.Compute/virtualMachines/write" +# data_actions: +# - "Microsoft.Storage/storageAccounts/blobServices/containers/blobs/read" +# not_data_actions: +# - "Microsoft.Storage/storageAccounts/blobServices/containers/blobs/write" +# assignable_scopes: +# - "/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}" +# register: output +# +#- name: assert output changed +# assert: +# that: +# - output.changed +# +#- name: Get role definition facts +# azure_rm_roledefinition_info: +# role_name: "{{ role_name }}" +# scope: "/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}" +# type: custom +# register: roledef +# until: "{{ roledef.roledefinitions | length > 0 }}" +# retries: 50 +# delay: 60 +# +#- name: Assert role definition facts +# assert: +# that: +# - roledef['roledefinitions'] | length == 1 +# - roledef['roledefinitions'][0]['id'] +# +#- name: Create a role assignment (Check Mode) +# azure_rm_roleassignment: +# scope: "/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}" +# assignee_object_id: "{{ principal_id }}" +# role_definition_id: "{{ roledef['roledefinitions'][0]['id'] }}" +# check_mode: yes +# register: output +# +#- name: Assert creating role definition check mode +# assert: +# that: +# - output.changed +# +#- name: Create a role assignment +# azure_rm_roleassignment: +# scope: "/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}" +# assignee_object_id: "{{ principal_id }}" +# role_definition_id: "{{ roledef['roledefinitions'][0]['id'] }}" +# register: output +# +#- name: Assert creating role assignment +# assert: +# that: +# - output.changed +# +#- name: Get facts +# azure_rm_roleassignment_info: +# scope: "/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}" +# assignee: "{{ principal_id }}" +# role_definition_id: "{{ roledef['roledefinitions'][0]['id'] }}" +# register: facts +# +#- name: assert role assignment facts +# assert: +# that: +# - facts['roleassignments'] | length > 0 +# - facts['roleassignments'][0]['id'] +# +#- name: delete role assignment +# azure_rm_roleassignment: +# name: "{{ facts['roleassignments'][0]['id'].split('/')[-1] }}" +# scope: "/subscriptions/{{ subscription_id }}" +# state: absent +# +#- name: Delete the role definition (Check Mode) +# azure_rm_roledefinition: +# name: "{{ role_name }}" +# scope: "/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}" +# state: absent +# check_mode: yes +# register: output +# +#- name: assert deleting role definition check mode +# assert: +# that: output.changed +# +#- name: Delete the role definition +# azure_rm_roledefinition: +# name: "{{ role_name }}" +# scope: "/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}" +# state: absent +# register: output +# +#- assert: +# that: +# - output.changed diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_routetable/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_routetable/aliases new file mode 100644 index 000000000..759eafa2d --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_routetable/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group3 +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_routetable/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_routetable/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_routetable/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_routetable/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_routetable/tasks/main.yml new file mode 100644 index 000000000..98b3c752a --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_routetable/tasks/main.yml @@ -0,0 +1,195 @@ +- name: Prepare random number + set_fact: + name: "table{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + route_name: "route{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + run_once: yes + +- name: Create a route table (check mode) + azure_rm_routetable: + name: "{{ name }}" + resource_group: "{{ resource_group }}" + tags: + purpose: testing + check_mode: yes + register: output + +- assert: + that: + - not output.id + - output.changed + +- name: Create a route table + azure_rm_routetable: + name: "{{ name }}" + resource_group: "{{ resource_group }}" + tags: + purpose: testing + register: output + +- assert: + that: + - output.changed + - output.id + +- name: Create a route table (idemponent) + azure_rm_routetable: + name: "{{ name }}" + resource_group: "{{ resource_group }}" + tags: + purpose: testing + register: output + +- assert: + that: + - not output.changed + +- name: Get facts of the table + azure_rm_routetable_info: + name: "{{ name }}" + resource_group: "{{ resource_group }}" + register: output + +- assert: + that: + - "output.route_tables | length == 1" + - "output.route_tables[0].routes | length == 0" + +- name: Create route (check mode) + azure_rm_route: + name: "{{ route_name }}" + resource_group: "{{ resource_group }}" + next_hop_type: virtual_network_gateway + address_prefix: "10.1.0.0/16" + route_table_name: "{{ name }}" + check_mode: yes + register: output + +- assert: + that: + - output.changed + - not output.id + +- name: Create route + azure_rm_route: + name: "{{ route_name }}" + resource_group: "{{ resource_group }}" + next_hop_type: virtual_network_gateway + address_prefix: "10.1.0.0/16" + route_table_name: "{{ name }}" + register: output + +- assert: + that: + - output.changed + - output.id + +- name: Create route (idemponent) + azure_rm_route: + name: "{{ route_name }}" + resource_group: "{{ resource_group }}" + next_hop_type: virtual_network_gateway + address_prefix: "10.1.0.0/16" + route_table_name: "{{ name }}" + register: output + +- assert: + that: + - not output.changed + +- name: update route + azure_rm_route: + name: "{{ route_name }}" + resource_group: "{{ resource_group }}" + next_hop_type: virtual_network_gateway + address_prefix: "10.1.0.0/24" + route_table_name: "{{ name }}" + register: output + +- assert: + that: + - output.changed + +- name: Get facts of the route + azure_rm_route_info: + name: "{{ route_name }}" + resource_group: "{{ resource_group }}" + route_table_name: "{{ name }}" + register: output + +- assert: + that: + - output.routes[0].address_prefix == "10.1.0.0/24" + - output.routes[0].next_hop_type == "VirtualNetworkGateway" + +- name: Get facts of the table + azure_rm_routetable_info: + name: "{{ name }}" + resource_group: "{{ resource_group }}" + register: output + +- assert: + that: + - "output.route_tables | length == 1" + - "output.route_tables[0].routes | length == 1" + - output.route_tables[0].routes[0].address_prefix == '10.1.0.0/24' + +- name: Delete route (check mode) + azure_rm_route: + name: "{{ route_name }}" + resource_group: "{{ resource_group }}" + route_table_name: "{{ name }}" + state: absent + check_mode: yes + +- name: Delete route + azure_rm_route: + name: "{{ route_name }}" + resource_group: "{{ resource_group }}" + state: absent + route_table_name: "{{ name }}" + register: output + +- assert: + that: + - output.changed + +- name: Delete route (idemponent) + azure_rm_route: + name: "{{ route_name }}" + resource_group: "{{ resource_group }}" + state: absent + route_table_name: "{{ name }}" + register: output + +- assert: + that: + - not output.changed + +- name: Delete route table (check mode) + azure_rm_routetable: + name: "{{ name }}" + resource_group: "{{ resource_group }}" + state: absent + check_mode: yes + +- name: Delete route table + azure_rm_routetable: + name: "{{ name }}" + resource_group: "{{ resource_group }}" + state: absent + register: output + +- assert: + that: + - output.changed + +- name: Delete route table (idemponent) + azure_rm_routetable: + name: "{{ name }}" + resource_group: "{{ resource_group }}" + state: absent + register: output + +- assert: + that: + - not output.changed diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_securitygroup/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_securitygroup/aliases new file mode 100644 index 000000000..7f7d9528f --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_securitygroup/aliases @@ -0,0 +1,4 @@ +cloud/azure +shippable/azure/group2 +destructive +azure_rm_securitygroup_info diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_securitygroup/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_securitygroup/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_securitygroup/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_securitygroup/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_securitygroup/tasks/main.yml new file mode 100644 index 000000000..dd35db16a --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_securitygroup/tasks/main.yml @@ -0,0 +1,377 @@ +- name: Prepare random number + set_fact: + secgroupname: "sg{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + asg_name1: "asg1{{ resource_group | hash('md5') | truncate(7, True, '') }}" + asg_name2: "asg2{{ resource_group | hash('md5') | truncate(7, True, '') }}" + sg_name1: "sgasg{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + run_once: yes + +- name: Create security group + azure_rm_securitygroup: + resource_group: "{{ resource_group }}" + name: "{{ secgroupname }}" + tags: + testing: testing + delete: on-exit + foo: bar + testkey: testvalue + purge_rules: yes + rules: + - name: DenySSH + protocol: Tcp + destination_port_range: 22 + access: Deny + priority: 100 + direction: Inbound + - name: AllowSSH + protocol: Tcp + source_address_prefix: 174.109.158.0/24 + destination_port_range: 22 + access: Allow + priority: 101 + direction: Inbound + register: output +- name: assert resource created + assert: + that: "{{ output.state.rules | length }} == 2" + +- name: Gather facts by tags + azure_rm_securitygroup_info: + resource_group: "{{ resource_group }}" + tags: + - testing + - foo:bar + - testkey + register: output +- name: assert resource retrieved + assert: + that: + - output.securitygroups | length == 1 + - output.securitygroups[0].default_rules | length > 0 + - output.securitygroups[0].name == '{{ secgroupname }}' + - output.securitygroups[0].network_interfaces | length == 0 + - output.securitygroups[0].rules | length == 2 + - output.securitygroups[0].subnets | length == 0 + +- name: Add/Update rules on existing security group + azure_rm_securitygroup: + resource_group: "{{ resource_group }}" + name: "{{ secgroupname }}" + rules: + - name: AllowSSH + protocol: Tcp + source_address_prefix: 174.108.158.0/24 + destination_port_range: 22 + access: Allow + priority: 101 + - name: AllowSSHFromHome + protocol: Tcp + source_address_prefix: 174.109.158.0/24 + destination_port_range: 22-23 + priority: 102 + - name: AllowHTTPandHTTPS + protocol: Tcp + source_address_prefix: 174.109.158.0/24 + destination_port_range: + - 80 + - 443 + priority: 103 + register: output +- name: assert resource updated + assert: + that: + - "{{ output.state.rules | length }} == 4" + - output.state.rules[0].source_address_prefix == '174.108.158.0/24' + +- name: Gather facts after update + azure_rm_securitygroup_info: + resource_group: "{{ resource_group }}" + name: "{{ secgroupname }}" + register: output +- name: assert rules updated + assert: + that: + - output.securitygroups[0].rules | length == 4 + +- name: Test idempotence + azure_rm_securitygroup: + resource_group: "{{ resource_group }}" + name: "{{ secgroupname }}" + rules: + - name: AllowSSH + protocol: Tcp + source_address_prefix: 174.108.158.0/24 + destination_port_range: 22 + access: Allow + priority: 101 + - name: AllowSSHFromHome + protocol: Tcp + source_address_prefix: 174.109.158.0/24 + destination_port_range: 22-23 + priority: 102 + - name: AllowHTTPandHTTPS + protocol: Tcp + source_address_prefix: 174.109.158.0/24 + destination_port_range: + - 80 + - 443 + priority: 103 + register: output +- name: assert resource not updated + assert: + that: not output.changed + +- name: Update tags + azure_rm_securitygroup: + resource_group: "{{ resource_group }}" + name: "{{ secgroupname }}" + tags: + testing: testing + delete: never + baz: bar + append_tags: false + register: output +- name: assert resource updated + assert: + that: + - output.state.tags | length == 3 + - output.state.tags.delete == 'never' + +- name: Purge tags + azure_rm_securitygroup: + resource_group: "{{ resource_group }}" + name: "{{ secgroupname }}" + append_tags: false + tags: + testing: testing + delete: on-exit + register: output +- name: assert resource updated + assert: + that: + - output.state.tags | length == 2 + - output.state.tags.delete == 'on-exit' + +- name: Gather facts for one accounts + azure_rm_securitygroup_info: + resource_group: "{{ resource_group }}" + name: "{{ secgroupname }}" + register: output +- name: assert resource retrieved + assert: + that: + - output.securitygroups | length == 1 + +- name: Gather facts for all accounts + azure_rm_securitygroup_info: + resource_group: "{{ resource_group }}" + tags: + - testing:testing + register: output_groups +- name: assert resource retrieved + assert: + that: + - output_groups.securitygroups | length > 0 + +- name: Create security group with source_address_prefixes + azure_rm_securitygroup: + resource_group: "{{ resource_group }}" + name: "{{ secgroupname }}" + tags: + testing: testing + delete: on-exit + foo: bar + purge_rules: yes + rules: + - name: AllowSSH + protocol: Tcp + source_address_prefix: + - 52.100.120.240 + - 53.100.250.190 + - 54.110.200.200 + destination_port_range: 22 + access: Allow + priority: 101 + direction: Inbound + register: output +- name: assert resource created + assert: + that: + - "{{ output.state.rules | length }} == 1" + - "{{ output.state.rules[0].source_address_prefixes | length }} == 3" + - not output.state.rules[0].source_address_prefix + +- name: Create security group with source_address_prefixes(idempotent) + azure_rm_securitygroup: + resource_group: "{{ resource_group }}" + name: "{{ secgroupname }}" + tags: + testing: testing + delete: on-exit + foo: bar + purge_rules: yes + rules: + - name: AllowSSH + protocol: Tcp + source_address_prefix: + - 52.100.120.240 + - 53.100.250.190 + - 54.110.200.200 + destination_port_range: 22 + access: Allow + priority: 101 + direction: Inbound + register: output +- name: assert resource not updated + assert: + that: not output.changed + +- name: Add a single one group + azure_rm_securitygroup: + resource_group: "{{ resource_group }}" + name: "{{ secgroupname }}" + tags: + testing: testing + delete: on-exit + foo: bar + rules: + - name: DenySSH + protocol: Tcp + source_address_prefix: + - 54.120.120.240 + destination_port_range: 22 + access: Deny + priority: 102 + direction: Inbound + register: output +- name: assert resource updated + assert: + that: + - output.changed + - "{{ output.state.rules | length }} == 2" + +# Use azure_rm_resource module to create with uppercase protocol name +- name: Create security group with uppercase protocol name + azure_rm_resource: + resource_group: "{{ resource_group }}" + provider: network + resource_type: networkSecurityGroups + resource_name: "{{ secgroupname }}" + api_version: 2022-07-01 + body: + location: "{{ output.state.location }}" + properties: + securityRules: + - name: Upper_Protocal + properties: + protocol: TCP # UPPERCASE + access: Allow + sourceAddressPrefix: "*" + sourcePortRange: "*" + destinationAddressPrefix: "*" + destinationPortRange: 80 + priority: 100 + direction: Inbound + +- name: Create security group with capitalized protocol name(idempotent) + azure_rm_securitygroup: + resource_group: "{{ resource_group }}" + name: "{{ secgroupname }}" + rules: + - name: Upper_Protocal + protocol: Tcp # Capitalized + access: Allow + destination_port_range: 80 + priority: 100 + direction: Inbound + register: output +- name: assert resource not updated + assert: + that: + - not output.changed + +- name: Create Application security group 1 + azure_rm_applicationsecuritygroup: + resource_group: "{{ resource_group }}" + name: "{{ asg_name1 }}" + tags: + testing: testing + register: asg1 + +- name: Create Application security group 2 + azure_rm_applicationsecuritygroup: + resource_group: "{{ resource_group_secondary }}" + name: "{{ asg_name2 }}" + tags: + testing: testing + register: asg2 + +- name: Create security group with application security group + azure_rm_securitygroup: + resource_group: "{{ resource_group }}" + name: "{{ sg_name1 }}" + purge_rules: yes + rules: + - name: AsgToAsg + protocol: Tcp + source_application_security_groups: + - "{{ asg1.id }}" + destination_application_security_groups: + - resource_group: "{{ resource_group_secondary }}" + name: "{{ asg_name2 }}" + destination_port_range: 22 + access: Allow + priority: 101 + direction: Inbound + register: output +- name: assert resource retrieved + assert: + that: + - output.changed + +- name: Create security group with application security group - Idempotent + azure_rm_securitygroup: + resource_group: "{{ resource_group }}" + name: "{{ sg_name1 }}" + purge_rules: yes + rules: + - name: AsgToAsg + protocol: Tcp + source_application_security_groups: + - "{{ asg_name1 }}" + destination_application_security_groups: + - resource_group: "{{ resource_group_secondary }}" + name: "{{ asg_name2 }}" + destination_port_range: 22 + access: Allow + priority: 101 + direction: Inbound + register: output +- name: assert resource not updated + assert: + that: + - not output.changed + +- name: Delete security group + azure_rm_securitygroup: + resource_group: "{{ resource_group }}" + name: "{{ sg_name1 }}" + state: absent + +- name: Delete security group + azure_rm_securitygroup: + resource_group: "{{ resource_group }}" + name: "{{ secgroupname }}" + state: absent + +- name: Clean up Application security group 2 + azure_rm_applicationsecuritygroup: + resource_group: "{{ resource_group }}" + name: "{{ asg_name1 }}" + state: absent + +- name: Clean up Application security group 2 + azure_rm_applicationsecuritygroup: + resource_group: "{{ resource_group_secondary }}" + name: "{{ asg_name2 }}" + state: absent diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_servicebus/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_servicebus/aliases new file mode 100644 index 000000000..cc941b59c --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_servicebus/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group12 +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_servicebus/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_servicebus/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_servicebus/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_servicebus/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_servicebus/tasks/main.yml new file mode 100644 index 000000000..1bc4c11f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_servicebus/tasks/main.yml @@ -0,0 +1,181 @@ +- name: Prepare random number + set_fact: + rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + run_once: yes + +- name: Create a namespace + azure_rm_servicebus: + name: "ns{{ rpfx }}" + resource_group: "{{ resource_group }}" + sku: premium + tags: + key1: value1 + register: namespace + +- assert: + that: + - namespace.id + - namespace.changed + - namespace.tags + +- name: Create a namespace (idempontent) + azure_rm_servicebus: + name: "ns{{ rpfx }}" + resource_group: "{{ resource_group }}" + register: namespace + +- assert: + that: + - not namespace.changed + +- name: Create a queue + azure_rm_servicebusqueue: + name: "queue{{ rpfx }}" + namespace: "ns{{ rpfx }}" + resource_group: "{{ resource_group }}" + max_message_size_in_kb: 2048 + max_size_in_mb: 2048 + register: queue + +- assert: + that: + - queue.id + - queue.changed + +- name: Create a topic (check mode) + azure_rm_servicebustopic: + name: "topic{{ rpfx }}" + resource_group: "{{ resource_group }}" + namespace: "ns{{ rpfx }}" + duplicate_detection_time_in_seconds: 600 + max_message_size_in_kb: 2048 + max_size_in_mb: 2048 + check_mode: yes + register: output + +- assert: + that: + - output.changed + +- name: Create a topic + azure_rm_servicebustopic: + name: "topic{{ rpfx }}" + resource_group: "{{ resource_group }}" + namespace: "ns{{ rpfx }}" + duplicate_detection_time_in_seconds: 600 + max_message_size_in_kb: 2048 + max_size_in_mb: 2048 + register: output + +- assert: + that: + - output.changed + - output.id + - "'subscription_count' not in output" + +- name: Create a topic (idempontent) + azure_rm_servicebustopic: + name: "topic{{ rpfx }}" + resource_group: "{{ resource_group }}" + namespace: "ns{{ rpfx }}" + duplicate_detection_time_in_seconds: 600 + max_message_size_in_kb: 2048 + max_size_in_mb: 2048 + register: output + +- assert: + that: + - not output.changed + +- name: Create test policy + azure_rm_servicebussaspolicy: + name: testpolicy + resource_group: "{{ resource_group }}" + namespace: "ns{{ rpfx }}" + topic: "topic{{ rpfx }}" + rights: manage + +- name: Create a subscription + azure_rm_servicebustopicsubscription: + name: "subs{{ rpfx }}" + resource_group: "{{ resource_group }}" + namespace: "ns{{ rpfx }}" + topic: "topic{{ rpfx }}" + register: subs + +- assert: + that: + - subs.id + - subs.changed + +- name: Retrive topic + azure_rm_servicebus_info: + type: topic + name: "topic{{ rpfx }}" + resource_group: "{{ resource_group }}" + namespace: "ns{{ rpfx }}" + show_sas_policies: yes + register: facts + +- assert: + that: + - "facts.servicebuses | length == 1" + - facts.servicebuses[0].id == output.id + - facts.servicebuses[0].subscription_count == 1 + - facts.servicebuses[0].sas_policies.testpolicy + - facts.servicebuses[0].sas_policies.testpolicy.rights == 'manage' + +- name: Delete subscription + azure_rm_servicebustopicsubscription: + name: "subs{{ rpfx }}" + resource_group: "{{ resource_group }}" + namespace: "ns{{ rpfx }}" + topic: "topic{{ rpfx }}" + state: absent + +- name: Retrive topic + azure_rm_servicebus_info: + type: topic + name: "topic{{ rpfx }}" + resource_group: "{{ resource_group }}" + namespace: "ns{{ rpfx }}" + show_sas_policies: yes + register: facts + +- assert: + that: + - facts.servicebuses[0].subscription_count == 0 + - "facts.servicebuses | length == 1" + +- name: Delete topic + azure_rm_servicebustopic: + name: "topic{{ rpfx }}" + resource_group: "{{ resource_group }}" + namespace: "ns{{ rpfx }}" + state: absent + +- name: Retrive topic + azure_rm_servicebus_info: + name: "topic{{ rpfx }}" + type: topic + resource_group: "{{ resource_group }}" + namespace: "ns{{ rpfx }}" + show_sas_policies: yes + register: facts + +- assert: + that: + - "facts.servicebuses | length == 0" + +- name: Delete queue + azure_rm_servicebusqueue: + name: "queue{{ rpfx }}" + resource_group: "{{ resource_group }}" + namespace: "ns{{ rpfx }}" + state: absent + +- name: Delete namespace + azure_rm_servicebus: + name: "ns{{ rpfx }}" + resource_group: "{{ resource_group }}" + state: absent diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_sqlmanagedinstance/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_sqlmanagedinstance/aliases new file mode 100644 index 000000000..9d5ba080a --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_sqlmanagedinstance/aliases @@ -0,0 +1,3 @@ +cloud/azure +destructive +shippable/azure/group9 diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_sqlmanagedinstance/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_sqlmanagedinstance/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_sqlmanagedinstance/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_sqlmanagedinstance/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_sqlmanagedinstance/tasks/main.yml new file mode 100644 index 000000000..99c9f80cb --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_sqlmanagedinstance/tasks/main.yml @@ -0,0 +1,182 @@ +- name: Prepare random number + set_fact: + random_postfix: "sqlmi{{ 1000 | random }}{{ resource_group | hash('md5') | truncate(7, True, '') }}" + +- name: Create virtual network + azure_rm_virtualnetwork: + name: "{{ random_postfix }}" + address_prefixes_cidr: + - 10.1.0.0/16 + - 172.100.0.0/16 + - fdda:e69b:1587:495e::/64 + dns_servers: + - 127.0.0.1 + - 127.0.0.3 + resource_group: "{{ resource_group }}" + +- name: Create a route table + azure_rm_routetable: + name: "{{ random_postfix }}" + resource_group: "{{ resource_group }}" + tags: + purpose: testing + +- name: Create security group + azure_rm_securitygroup: + resource_group: "{{ resource_group }}" + name: "{{ random_postfix }}" + tags: + testing: testing + delete: on-exit + foo: bar + purge_rules: yes + rules: + - name: DenySSH + protocol: Tcp + destination_port_range: 22 + access: Deny + priority: 100 + direction: Inbound + - name: AllowSSH + protocol: Tcp + source_address_prefix: 174.109.158.0/24 + destination_port_range: 22 + access: Allow + priority: 101 + direction: Inbound + +- name: Add the subnet back + azure_rm_subnet: + name: foobar + virtual_network_name: "{{ random_postfix }}" + resource_group: "{{ resource_group }}" + address_prefix_cidr: "10.1.0.0/24" + security_group: + resource_gorup: "{{ resource_group }}" + name: "{{ random_postfix }}" + route_table: + name: "{{ random_postfix }}" + resource_group: "{{ resource_group }}" + delegations: + - name: 'mysqlinstance' + serviceName: "Microsoft.Sql/managedInstances" + register: subnet_output + +- name: Create sql managed instance (Checkmode test) + azure_rm_sqlmanagedinstance: + resource_group: "{{ resource_group }}" + name: "{{ random_postfix }}" + subnet_id: "{{ subnet_output.state.id }}" + identity: + type: SystemAssigned + sku: + name: GP_Gen5 + tier: GeneralPurpose + family: Gen5 + capacity: 8 + administrator_login: azureuser + administrator_login_password: Fredtest@password0329test + storage_size_in_gb: 256 + v_cores: 8 + tags: + key0: value0 + register: output + +- name: Assert the resource instance is not exist + assert: + that: + - output.changed + +- name: Create sql managed instance + azure_rm_sqlmanagedinstance: + resource_group: "{{ resource_group }}" + name: "{{ random_postfix }}" + subnet_id: "{{ subnet_output.state.id }}" + identity: + type: SystemAssigned + sku: + name: GP_Gen5 + tier: GeneralPurpose + family: Gen5 + capacity: 8 + administrator_login: azureuser + administrator_login_password: Fredtest@password0329test + storage_size_in_gb: 256 + v_cores: 8 + tags: + key0: value0 + register: output + +- name: Assert the resource instance is well created + assert: + that: + - output.changed + +- name: Create sql managed instance (Idempotent test) + azure_rm_sqlmanagedinstance: + resource_group: "{{ resource_group }}" + name: "{{ random_postfix }}" + subnet_id: "{{ subnet_output.state.id }}" + identity: + type: SystemAssigned + sku: + name: GP_Gen5 + tier: GeneralPurpose + family: Gen5 + capacity: 8 + administrator_login: azureuser + storage_size_in_gb: 256 + v_cores: 8 + tags: + key0: value0 + register: output + +- name: Assert the resource instance no changed + assert: + that: + - not output.changed + +- name: Upgarde sql managed instance with tags + azure_rm_sqlmanagedinstance: + resource_group: "{{ resource_group }}" + name: "{{ random_postfix }}" + subnet_id: "{{ subnet_output.state.id }}" + identity: + type: SystemAssigned + sku: + name: GP_Gen5 + tier: GeneralPurpose + family: Gen5 + capacity: 8 + administrator_login: azureuser + administrator_login_password: Fredtest@password0329test + storage_size_in_gb: 256 + v_cores: 8 + tags: + key0: value0 + key1: value1 + register: output + +- name: Assert the resource instance is update + assert: + that: + - output.changed + +- name: Get SQL managed instance by name + azure_rm_sqlmanagedinstance_info: + resource_group: "{{ resource_group }}" + name: "{{ random_postfix }}" + register: output + +- name: Assert the resource instance is well created + assert: + that: + - output.sql_managed_instance[0].tags | length == 1 + - output.sql_managed_instance[0].storage_size_in_gb == 256 + - output.sql_managed_instance[0].sku.name == 'GP_Gen5' + +- name: Delete sql managed instance + azure_rm_sqlmanagedinstance: + resource_group: "{{ resource_group }}" + name: "{{ random_postfix }}" + state: absent diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_sqlserver/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_sqlserver/aliases new file mode 100644 index 000000000..952e4dac8 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_sqlserver/aliases @@ -0,0 +1,8 @@ +cloud/azure +destructive +shippable/azure/group9 +azure_rm_sqlserver_facts +azure_rm_sqldatabase +azure_rm_sqldatabase_facts +azure_rm_sqlfirewallrule +azure_rm_sqlfirewallrule_facts diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_sqlserver/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_sqlserver/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_sqlserver/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_sqlserver/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_sqlserver/tasks/main.yml new file mode 100644 index 000000000..e71b620ca --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_sqlserver/tasks/main.yml @@ -0,0 +1,840 @@ +- name: Prepare random number + set_fact: + random_postfix: "{{ 1000 | random }}{{ resource_group | hash('md5') | truncate(7, True, '') }}" + tenant_id: "{{ azure_tenant }}" + run_azuread_tests: false + azuread_group_name: "Test Security Group" + azuread_group_id: "00000000-0000-0000-0000-000000000000" + run_once: yes + +- name: Create instance of SQL Server -- check mode + azure_rm_sqlserver: + resource_group: "{{ resource_group }}" + name: "sqlsrv{{ random_postfix }}" + location: eastus + admin_username: mylogin + admin_password: Password123! + check_mode: yes + register: output +- name: Assert the resource instance is well created + assert: + that: + - output.changed + +- name: Create instance of SQL Server + azure_rm_sqlserver: + resource_group: "{{ resource_group }}" + name: "sqlsrv{{ random_postfix }}" + location: eastus + admin_username: mylogin + admin_password: Password123! + tags: + aaa: bbb + register: output +- name: Assert the resource instance is well created + assert: + that: + - output.changed + - output.state == 'Ready' + +- name: Create again instance of SQL Server + azure_rm_sqlserver: + resource_group: "{{ resource_group }}" + name: "sqlsrv{{ random_postfix }}" + location: eastus + admin_username: mylogin + admin_password: Password123! + register: output +- name: Assert the state has not changed + assert: + that: + - output.changed == false + - output.state == 'Ready' + +- name: Create extended instance of SQL Server + azure_rm_sqlserver: + resource_group: "{{ resource_group }}" + name: "sqlsrv-extended-{{ random_postfix }}" + location: eastus + admin_username: mylogin2 + admin_password: Password123! + minimal_tls_version: '1.2' + public_network_access: Disabled + restrict_outbound_network_access: Enabled + tags: + aaa: bbb + register: output +- name: Assert the resource instance is well created + assert: + that: + - output.changed + - output.state == 'Ready' + +- name: Create extended instance of SQL Server - idempotent + azure_rm_sqlserver: + resource_group: "{{ resource_group }}" + name: "sqlsrv-extended-{{ random_postfix }}" + location: eastus + admin_username: mylogin2 + admin_password: Password123! + minimal_tls_version: '1.2' + public_network_access: Disabled + restrict_outbound_network_access: Enabled + tags: + aaa: bbb + register: output +- name: Assert the state has not changed + assert: + that: + - not output.changed + - output.state == 'Ready' + +- name: Update SQL admin password + azure_rm_sqlserver: + resource_group: "{{ resource_group }}" + name: "sqlsrv-extended-{{ random_postfix }}" + location: eastus + admin_password: Password123!321! + change_admin_password: true + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Create instance of SQL Server with Azure AD admin + azure_rm_sqlserver: + resource_group: "{{ resource_group }}" + name: "sqlsrv-azuread-{{ random_postfix }}" + location: eastus + admin_username: sqllogin + admin_password: Password123! + administrators: + principal_type: Group + login: "{{ azuread_group_name }}" + sid: "{{ azuread_group_id }}" + tenant_id: "{{ tenant_id }}" + azure_ad_only_authentication: false + register: output + when: run_azuread_tests | bool +- name: Assert the resource instance is well created + assert: + that: + - output.changed + - output.state == 'Ready' + when: run_azuread_tests | bool + +- name: Create instance of SQL Server with Azure AD admin - idempotent + azure_rm_sqlserver: + resource_group: "{{ resource_group }}" + name: "sqlsrv-azuread-{{ random_postfix }}" + location: eastus + admin_username: sqllogin + admin_password: Password123! + administrators: + principal_type: Group + login: "{{ azuread_group_name }}" + sid: "{{ azuread_group_id }}" + tenant_id: "{{ tenant_id }}" + azure_ad_only_authentication: false + register: output + when: run_azuread_tests | bool +- name: Assert the state has not changed + assert: + that: + - not output.changed + - output.state == 'Ready' + when: run_azuread_tests | bool + +# azure_rm_sqlserver_facts tests + +- name: Gather facts SQL Server + azure_rm_sqlserver_info: + resource_group: "{{ resource_group }}" + server_name: "sqlsrv{{ random_postfix }}" + register: output +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.servers.sqlsrv{{ random_postfix }}.id != None + - output.servers.sqlsrv{{ random_postfix }}.name == "sqlsrv{{ random_postfix }}" + - output.servers.sqlsrv{{ random_postfix }}.type != None + - output.servers.sqlsrv{{ random_postfix }}.location != None + - output.servers.sqlsrv{{ random_postfix }}.kind != None + - output.servers.sqlsrv{{ random_postfix }}.version != None + - output.servers.sqlsrv{{ random_postfix }}.state != None + - output.servers.sqlsrv{{ random_postfix }}.fully_qualified_domain_name != None + - output.servers.sqlsrv{{ random_postfix }}.tags.aaa == 'bbb' + - output.servers.sqlsrv{{ random_postfix }}.admin_username == 'mylogin' + - output.servers.sqlsrv{{ random_postfix }}.administrators == None + +- name: Gather facts for extended SQL Server + azure_rm_sqlserver_info: + resource_group: "{{ resource_group }}" + server_name: "sqlsrv-extended-{{ random_postfix }}" + register: output +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.servers['sqlsrv-extended-{{ random_postfix }}'].id != None + - output.servers['sqlsrv-extended-{{ random_postfix }}'].name == 'sqlsrv-extended-{{ random_postfix }}' + - output.servers['sqlsrv-extended-{{ random_postfix }}'].type != None + - output.servers['sqlsrv-extended-{{ random_postfix }}'].location != None + - output.servers['sqlsrv-extended-{{ random_postfix }}'].kind != None + - output.servers['sqlsrv-extended-{{ random_postfix }}'].version != None + - output.servers['sqlsrv-extended-{{ random_postfix }}'].state != None + - output.servers['sqlsrv-extended-{{ random_postfix }}'].fully_qualified_domain_name != None + - output.servers['sqlsrv-extended-{{ random_postfix }}'].tags.aaa == 'bbb' + - output.servers['sqlsrv-extended-{{ random_postfix }}'].minimal_tls_version == '1.2' + - output.servers['sqlsrv-extended-{{ random_postfix }}'].public_network_access == 'Disabled' + - output.servers['sqlsrv-extended-{{ random_postfix }}'].restrict_outbound_network_access == 'Enabled' + - output.servers['sqlsrv-extended-{{ random_postfix }}'].admin_username == 'mylogin2' + - output.servers['sqlsrv-extended-{{ random_postfix }}'].administrators == None + +- name: Gather facts for SQL Server with Azure AD admin + azure_rm_sqlserver_info: + resource_group: "{{ resource_group }}" + server_name: "sqlsrv-azuread-{{ random_postfix }}" + register: output + when: run_azuread_tests | bool +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.servers['sqlsrv-azuread-{{ random_postfix }}'].id != None + - output.servers['sqlsrv-azuread-{{ random_postfix }}'].name == 'sqlsrv-azuread-{{ random_postfix }}' + - output.servers['sqlsrv-azuread-{{ random_postfix }}'].type != None + - output.servers['sqlsrv-azuread-{{ random_postfix }}'].location != None + - output.servers['sqlsrv-azuread-{{ random_postfix }}'].kind != None + - output.servers['sqlsrv-azuread-{{ random_postfix }}'].version != None + - output.servers['sqlsrv-azuread-{{ random_postfix }}'].state != None + - output.servers['sqlsrv-azuread-{{ random_postfix }}'].fully_qualified_domain_name != None + - output.servers['sqlsrv-azuread-{{ random_postfix }}'].tags | length == 0 + - output.servers['sqlsrv-azuread-{{ random_postfix }}'].admin_username == 'sqllogin' + - output.servers['sqlsrv-azuread-{{ random_postfix }}'].administrators.administrator_type == 'ActiveDirectory' + - output.servers['sqlsrv-azuread-{{ random_postfix }}'].administrators.azure_ad_only_authentication == False + - output.servers['sqlsrv-azuread-{{ random_postfix }}'].administrators.login == '{{ azuread_group_name }}' + - output.servers['sqlsrv-azuread-{{ random_postfix }}'].administrators.principal_type == 'Group' + - output.servers['sqlsrv-azuread-{{ random_postfix }}'].administrators.sid == '{{ azuread_group_id }}' + - output.servers['sqlsrv-azuread-{{ random_postfix }}'].administrators.tenant_id == '{{ tenant_id }}' + when: run_azuread_tests | bool + +- name: Gather facts SQL Server - unexisting + azure_rm_sqlserver_info: + resource_group: "{{ resource_group }}" + server_name: "unexisting" + register: output +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.servers == {} + +- name: Gather facts SQL Server - list + azure_rm_sqlserver_info: + resource_group: "{{ resource_group }}" + register: output +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.servers.sqlsrv{{ random_postfix }}.id != None + - output.servers.sqlsrv{{ random_postfix }}.name == "sqlsrv{{ random_postfix }}" + - output.servers.sqlsrv{{ random_postfix }}.type != None + - output.servers.sqlsrv{{ random_postfix }}.location != None + - output.servers.sqlsrv{{ random_postfix }}.kind != None + - output.servers.sqlsrv{{ random_postfix }}.version != None + - output.servers.sqlsrv{{ random_postfix }}.state != None + - output.servers.sqlsrv{{ random_postfix }}.fully_qualified_domain_name != None + +# azure_rm_sqldatabase tests + +- name: Create instance of SQL Database -- check mode + azure_rm_sqldatabase: + resource_group: "{{ resource_group }}" + server_name: sqlsrv{{ random_postfix }} + name: database{{ random_postfix }} + location: eastus + check_mode: yes + register: output +- name: Assert the resource instance is well created + assert: + that: + - output.changed + +- name: Create instance of SQL Database + azure_rm_sqldatabase: + resource_group: "{{ resource_group }}" + server_name: sqlsrv{{ random_postfix }} + name: database{{ random_postfix }} + location: eastus + edition: premium + tags: + aaa: bbb + register: output +- name: Assert the resource instance is well created + assert: + that: + - output.changed + - output.status == 'Online' + +- name: Create again instance of SQL Database + azure_rm_sqldatabase: + resource_group: "{{ resource_group }}" + server_name: sqlsrv{{ random_postfix }} + name: database{{ random_postfix }} + location: eastus + edition: premium + tags: + aaa: bbb + register: output +- name: Assert the state has not changed + assert: + that: + - output.changed == false + - output.status == 'Online' + +# test database point in time restore +- name: Gather facts SQL Database and wait for restore point + azure_rm_sqldatabase_info: + resource_group: "{{ resource_group }}" + server_name: sqlsrv{{ random_postfix }} + name: database{{ random_postfix }} + register: output + until: output.databases[0].earliest_restore_date != None + retries: 10 + delay: 20 +- name: Assert that it can be restored from + assert: + that: + - output.databases[0].id != None + - output.databases[0].earliest_restore_date != None + +- name: Create second SQL Database, restoring from the previous Database + azure_rm_sqldatabase: + resource_group: "{{ resource_group }}" + create_mode: point_in_time_restore + restore_point_in_time: "{{ output.databases[0].earliest_restore_date }}" + source_database_id: "{{ output.databases[0].id }}" + server_name: sqlsrv{{ random_postfix }} + name: database{{ random_postfix }}PITR + location: eastus + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Delete instance of SQL Database Point in time recovery + azure_rm_sqldatabase: + resource_group: "{{ resource_group }}" + server_name: sqlsrv{{ random_postfix }} + name: database{{ random_postfix }}PITR + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +# test database facter: +- name: Create second SQL Database + azure_rm_sqldatabase: + resource_group: "{{ resource_group }}" + server_name: sqlsrv{{ random_postfix }} + name: database{{ random_postfix }}second + location: eastus + +- name: Gather facts SQL Database + azure_rm_sqldatabase_info: + resource_group: "{{ resource_group }}" + server_name: sqlsrv{{ random_postfix }} + name: database{{ random_postfix }} + register: output +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.databases[0].id != None + - output.databases[0].name != None + - output.databases[0].location != None + - output.databases[0].sku.name != None + - output.databases[0].sku.tier != None + - output.databases[0].sku.capacity != None + - output.databases[0].kind != None + - output.databases[0].status != None + +- name: Gather facts SQL Database + azure_rm_sqldatabase_info: + resource_group: "{{ resource_group }}" + server_name: sqlsrv{{ random_postfix }} + register: output +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.databases[0].id != None + - output.databases[0].name != None + - output.databases[0].location != None + - output.databases[0].sku.name != None + - output.databases[0].sku.tier != None + - output.databases[0].sku.capacity != None + - output.databases[0].kind != None + - output.databases[0].status != None + - output.databases[1].id != None + - output.databases[1].name != None + - output.databases[1].location != None + - output.databases[1].sku.name != None + - output.databases[1].sku.tier != None + - output.databases[1].sku.capacity != None + - output.databases[1].kind != None + - output.databases[1].status != None + +- name: Delete instance of secondary database + azure_rm_sqldatabase: + resource_group: "{{ resource_group }}" + server_name: sqlsrv{{ random_postfix }} + name: database{{ random_postfix }}second + state: absent + +# clean up databases +- name: Delete instance of SQL Database -- check mode + azure_rm_sqldatabase: + resource_group: "{{ resource_group }}" + server_name: sqlsrv{{ random_postfix }} + name: database{{ random_postfix }} + state: absent + check_mode: yes + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Delete instance of SQL Database + azure_rm_sqldatabase: + resource_group: "{{ resource_group }}" + server_name: sqlsrv{{ random_postfix }} + name: database{{ random_postfix }} + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Delete unexisting instance of SQL Database + azure_rm_sqldatabase: + resource_group: "{{ resource_group }}" + server_name: sqlsrv{{ random_postfix }} + name: database{{ random_postfix }} + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed == false + +# Test With SKU +- name: Create SQL Database with sku -- check mode + azure_rm_sqldatabase: + resource_group: "{{ resource_group }}" + server_name: sqlsrv{{ random_postfix }} + name: database{{ random_postfix }}2 + location: eastus + sku: + name: S0 + tier: Standard + check_mode: yes + register: output +- name: Assert the state has not changed + assert: + that: + - output.changed + +- name: Create SQL Database with sku + azure_rm_sqldatabase: + resource_group: "{{ resource_group }}" + server_name: sqlsrv{{ random_postfix }} + name: database{{ random_postfix }}2 + location: eastus + tags: + aaa: bbb + sku: + name: S0 + tier: Standard + register: output +- name: Assert the resource instance is well created with good SKU + assert: + that: + - output.changed + - output.status == 'Online' + +- name: Gather facts SQL Database with good SKU + azure_rm_sqldatabase_info: + resource_group: "{{ resource_group }}" + server_name: sqlsrv{{ random_postfix }} + name: database{{ random_postfix }}2 + register: output +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.databases[0].id != None + - output.databases[0].name != None + - output.databases[0].location != None + - output.databases[0].sku.name == "S0" + - output.databases[0].sku.tier == "Standard" + - output.databases[0].sku.capacity != None + - output.databases[0].kind != None + - output.databases[0].status != None + +- name: Create again instance of SQL Database with same SKU + azure_rm_sqldatabase: + resource_group: "{{ resource_group }}" + server_name: sqlsrv{{ random_postfix }} + name: database{{ random_postfix }}2 + location: eastus + tags: + aaa: bbb + sku: + name: S0 + tier: Standard + register: output +- name: Assert the state has not changed + assert: + that: + - output.changed == false + - output.status == 'Online' + +- name: Create again instance of SQL Database with New SKU + azure_rm_sqldatabase: + resource_group: "{{ resource_group }}" + server_name: sqlsrv{{ random_postfix }} + name: database{{ random_postfix }}2 + location: eastus + tags: + aaa: bbb + sku: + name: P1 + tier: Premium + register: output +- name: Assert the state has not changed + assert: + that: + - output.changed + - output.status == 'Online' + +- name: Gather facts SQL Database with good New SKU + azure_rm_sqldatabase_info: + resource_group: "{{ resource_group }}" + server_name: sqlsrv{{ random_postfix }} + name: database{{ random_postfix }}2 + register: output +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.databases[0].id != None + - output.databases[0].name != None + - output.databases[0].location != None + - output.databases[0].sku.name == "P1" + - output.databases[0].sku.tier == "Premium" + - output.databases[0].sku.capacity != None + - output.databases[0].kind != None + - output.databases[0].status != None + +- name: Delete instance of SQL Database + azure_rm_sqldatabase: + resource_group: "{{ resource_group }}" + server_name: sqlsrv{{ random_postfix }} + name: database{{ random_postfix }}2 + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +# test database facts without databases +- name: Gather facts SQL Database + azure_rm_sqldatabase_info: + resource_group: "{{ resource_group }}" + server_name: sqlsrv{{ random_postfix }} + name: database{{ random_postfix }}2 + register: output +- name: Assert that empty dictionary was returned + assert: + that: + - output.changed == False + - output.databases | length == 0 + +- name: Gather facts SQL Database + azure_rm_sqldatabase_info: + resource_group: "{{ resource_group }}" + server_name: sqlsrv{{ random_postfix }} + register: output +- name: Assert that empty dictionary was returned (one database is there by default) + assert: + that: + - output.changed == False + - output.databases | length == 1 + +# azure_rm_sqlfirewallrule tests + +- name: Create instance of Firewall Rule -- check mode + azure_rm_sqlfirewallrule: + resource_group: "{{ resource_group }}" + server_name: "sqlsrv{{ random_postfix }}" + name: firewallrule{{ random_postfix }} + start_ip_address: 172.28.10.136 + end_ip_address: 172.28.10.138 + check_mode: yes + register: output +- name: Assert the resource instance is well created + assert: + that: + - output.changed + +- name: Create instance of Firewall Rule + azure_rm_sqlfirewallrule: + resource_group: "{{ resource_group }}" + server_name: "sqlsrv{{ random_postfix }}" + name: firewallrule{{ random_postfix }} + start_ip_address: 172.28.10.136 + end_ip_address: 172.28.10.138 + register: output +- name: Assert the resource instance is well created + assert: + that: + - output.changed + +- name: Create again instance of Firewall Rule + azure_rm_sqlfirewallrule: + resource_group: "{{ resource_group }}" + server_name: "sqlsrv{{ random_postfix }}" + name: firewallrule{{ random_postfix }} + start_ip_address: 172.28.10.136 + end_ip_address: 172.28.10.138 + register: output +- name: Assert the state has not changed + assert: + that: + - output.changed == false + +# +# azure_rm_sqlserverfirewallrule_facts +# + +- name: Create Firewall Rule - second + azure_rm_sqlfirewallrule: + resource_group: "{{ resource_group }}" + server_name: sqlsrv{{ random_postfix }} + name: firewallrule{{ random_postfix }}second + start_ip_address: 172.28.10.136 + end_ip_address: 172.28.10.138 + +- name: Gather facts SQL Firewall Rule + azure_rm_sqlfirewallrule_info: + resource_group: "{{ resource_group }}" + server_name: sqlsrv{{ random_postfix }} + name: firewallrule{{ random_postfix }} + register: output +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.rules[0].id != None + - output.rules[0].resource_group != None + - output.rules[0].server_name != None + - output.rules[0].name != None + - output.rules[0].start_ip_address != None + - output.rules[0].end_ip_address != None + +- name: Gather facts SQL Firewall Rule + azure_rm_sqlfirewallrule_info: + resource_group: "{{ resource_group }}" + server_name: sqlsrv{{ random_postfix }} + register: output +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.rules[0].id != None + - output.rules[0].id != None + - output.rules[0].resource_group != None + - output.rules[0].server_name != None + - output.rules[0].name != None + - output.rules[0].start_ip_address != None + - output.rules[0].end_ip_address != None + - output.rules[1].id != None + - output.rules[1].resource_group != None + - output.rules[1].server_name != None + - output.rules[1].name != None + - output.rules[1].start_ip_address != None + - output.rules[1].end_ip_address != None + +- name: Delete instance of Firewall Rule + azure_rm_sqlfirewallrule: + resource_group: "{{ resource_group }}" + server_name: sqlsrv{{ random_postfix }} + name: firewallrule{{ random_postfix }} + state: absent +- name: Delete instance of Firewall Rule + azure_rm_sqlfirewallrule: + resource_group: "{{ resource_group }}" + server_name: sqlsrv{{ random_postfix }} + name: firewallrule{{ random_postfix }}second + state: absent + +- name: Gather facts SQL Firewall Rule + azure_rm_sqlfirewallrule_info: + resource_group: "{{ resource_group }}" + server_name: sqlsrv{{ random_postfix }} + name: firewallrule{{ random_postfix }} + register: output +- name: Assert that empty dictionary was returned + assert: + that: + - output.changed == False + - output.rules | length == 0 + +# azure_rm_sqlelasticpool test + +- name: Create instance of SQL Elastic Pool -- check mode + azure_rm_sqlelasticpool: + resource_group: "{{ resource_group }}" + server_name: sqlsrv{{ random_postfix }} + name: EP{{ random_postfix }} + zone_redundant: False + tags: + aaa: bbb + check_mode: yes + register: output +- name: Assert the resource instance is well created + assert: + that: + - output.changed + +- name: Create instance of SQL Elastic Pool + azure_rm_sqlelasticpool: + resource_group: "{{ resource_group }}" + server_name: sqlsrv{{ random_postfix }} + name: EP{{ random_postfix }} + zone_redundant: False + tags: + aaa: bbb + register: output + +- name: Assert the resource instance is well created + assert: + that: + - output.changed + +- name: Create instance of SQL Elastic Pool -- Idempotent test + azure_rm_sqlelasticpool: + resource_group: "{{ resource_group }}" + server_name: sqlsrv{{ random_postfix }} + name: EP{{ random_postfix }} + zone_redundant: False + tags: + aaa: bbb + register: output + +- name: Assert the resource instance is well created + assert: + that: + - not output.changed + +- name: Update instance of SQL Elastic Pool + azure_rm_sqlelasticpool: + resource_group: "{{ resource_group }}" + server_name: sqlsrv{{ random_postfix }} + name: EP{{ random_postfix }} + zone_redundant: True + tags: + aaa1: bbb1 + register: output + +- name: Assert the resource instance is well created + assert: + that: + - output.changed + +- name: Gather facts SQL Database + azure_rm_sqlelasticpool_info: + resource_group: "{{ resource_group }}" + server_name: sqlsrv{{ random_postfix }} + name: EP{{ random_postfix }} + register: output + +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.elastic_pool[0].zone_redundant == true + - output.elastic_pool[0].tags | length == 2 + +- name: Delete instance of SQL Elastic Pool + azure_rm_sqlelasticpool: + resource_group: "{{ resource_group }}" + server_name: sqlsrv{{ random_postfix }} + name: EP{{ random_postfix }} + state: absent + register: output + +- name: Assert the resource instance is deleted + assert: + that: + - output.changed + +# finalise & clean up azure_rm_sqlserver test + +- name: Delete instance of SQL Server -- check mode + azure_rm_sqlserver: + resource_group: "{{ resource_group }}" + name: "sqlsrv{{ random_postfix }}" + state: absent + check_mode: yes + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Delete instance of SQL Server + azure_rm_sqlserver: + resource_group: "{{ resource_group }}" + name: "sqlsrv{{ random_postfix }}" + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed + +- name: Delete unexisting instance of SQL Server + azure_rm_sqlserver: + resource_group: "{{ resource_group }}" + name: "sqlsrv{{ random_postfix }}" + state: absent + register: output +- name: Assert the state has changed + assert: + that: + - output.changed == false + +- name: Delete extended instance of SQL Server + azure_rm_sqlserver: + resource_group: "{{ resource_group }}" + name: "sqlsrv-extended-{{ random_postfix }}" + state: absent + +- name: Delete instance of SQL Server with Azure AD admin + azure_rm_sqlserver: + resource_group: "{{ resource_group }}" + name: "sqlsrv-azuread-{{ random_postfix }}" + state: absent + when: run_azuread_tests | bool diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageaccount/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageaccount/aliases new file mode 100644 index 000000000..aa77c071a --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageaccount/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group2 +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageaccount/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageaccount/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageaccount/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageaccount/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageaccount/tasks/main.yml new file mode 100644 index 000000000..8c738a627 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageaccount/tasks/main.yml @@ -0,0 +1,592 @@ +- name: Set Storage Account Names + set_fact: + storage_account_name_default: "sa{{ resource_group | hash('md5') | truncate(20, True, '') }}" + storage_account_name_explicit: "sa{{ resource_group | hash('sha1') | truncate(20, True, '') }}" + +- name: Test invalid account name + azure_rm_storageaccount: + resource_group: "{{ resource_group }}" + name: "invalid_char$" + account_type: Standard_LRS + register: output + ignore_errors: true +- name: Check intentional name failure. + assert: + that: + - output.failed + - output.msg is regex('AccountNameInvalid') + +- name: Delete storage accounts to prepare fresh deployment + azure_rm_storageaccount: + resource_group: "{{ resource_group }}" + name: "{{ item }}" + state: absent + force_delete_nonempty: true + loop: + - "{{ storage_account_name_default }}" + - "{{ storage_account_name_explicit }}" + - "{{ storage_account_name_default }}01" + - "{{ storage_account_name_default }}02" + - "{{ storage_account_name_default }}03" + - "{{ storage_account_name_default }}04" + - "{{ storage_account_name_default }}06" + +- name: Create new storage account with defaults (omitted parameters) + azure_rm_storageaccount: + resource_group: "{{ resource_group }}" + name: "{{ storage_account_name_default }}" + account_type: Standard_LRS + register: defaults_output +- name: Assert status succeeded and results match expectations + assert: + that: + - defaults_output.changed + - defaults_output.state.name == storage_account_name_default + - defaults_output.state.id is defined + - defaults_output.state.https_only + - defaults_output.state.access_tier == None + - defaults_output.state.allow_blob_public_access == true + - defaults_output.state.minimum_tls_version == "TLS1_0" + +- name: Create storage account with static website disabled + azure_rm_storageaccount: + resource_group: "{{ resource_group }}" + name: "{{ storage_account_name_default }}01" + account_type: Standard_LRS + kind: StorageV2 + static_website: + enabled: false + register: output +- name: Assert output + assert: + that: + - output.changed + - output.state.static_website is defined + - not output.state.static_website.enabled + - output.state.static_website.index_document == None + - output.state.static_website.error_document404_path == None + +- name: Create storage account with static website disabled (idempotency test) + azure_rm_storageaccount: + resource_group: "{{ resource_group }}" + name: "{{ storage_account_name_default }}01" + account_type: Standard_LRS + kind: StorageV2 + static_website: + enabled: false + register: output +- name: Assert not changed + assert: + that: + - not output.changed + +- name: Enable storage account static website + azure_rm_storageaccount: + resource_group: "{{ resource_group }}" + name: "{{ storage_account_name_default }}01" + account_type: Standard_LRS + kind: StorageV2 + static_website: + enabled: true + register: output +- name: Assert output + assert: + that: + - output.changed + - output.state.static_website is defined + - output.state.static_website.enabled + - output.state.static_website.index_document == None + - output.state.static_website.error_document404_path == None + +- name: Configure additional storage account static website properties + azure_rm_storageaccount: + resource_group: "{{ resource_group }}" + name: "{{ storage_account_name_default }}01" + account_type: Standard_LRS + kind: StorageV2 + static_website: + enabled: true + index_document: "index.html" + error_document404_path: "error.html" + register: output +- name: Assert output + assert: + that: + - output.changed + - output.state.static_website is defined + - output.state.static_website.enabled + - output.state.static_website.index_document == 'index.html' + - output.state.static_website.error_document404_path == 'error.html' + +- name: Configure additional storage account static website properties (idempotency test) + azure_rm_storageaccount: + resource_group: "{{ resource_group }}" + name: "{{ storage_account_name_default }}01" + account_type: Standard_LRS + kind: StorageV2 + static_website: + enabled: true + index_document: "index.html" + error_document404_path: "error.html" + register: output +- name: Assert not changed + assert: + that: + - not output.changed + +- name: Create new storage account with Hierarchical Namespace enabled + azure_rm_storageaccount: + resource_group: "{{ resource_group }}" + name: "{{ storage_account_name_default }}06" + account_type: Standard_LRS + kind: StorageV2 + is_hns_enabled: true + register: output +- name: Assert output + assert: + that: + - output.changed + +- name: Gather facts of storage account + azure_rm_storageaccount_info: + resource_group: "{{ resource_group }}" + name: "{{ storage_account_name_default }}06" + register: output +- assert: + that: + - "output.storageaccounts | length == 1" + - output.storageaccounts[0].is_hns_enabled == true + +- name: Create storage account with static website enabled + azure_rm_storageaccount: + resource_group: "{{ resource_group }}" + name: "{{ storage_account_name_default }}04" + account_type: Standard_LRS + kind: StorageV2 + static_website: + enabled: true + index_document: "abc.htm" + register: output +- name: Assert output + assert: + that: + - output.changed + - output.state.static_website is defined + - output.state.static_website.enabled + - output.state.static_website.index_document == "abc.htm" + - output.state.static_website.error_document404_path == None + +- name: Create storage account with static website enabled (idempotency test) + azure_rm_storageaccount: + resource_group: "{{ resource_group }}" + name: "{{ storage_account_name_default }}04" + account_type: Standard_LRS + kind: StorageV2 + static_website: + enabled: true + index_document: "abc.htm" + register: output +- name: Assert not changed + assert: + that: + - not output.changed + +- name: Disable storage account static website + azure_rm_storageaccount: + resource_group: "{{ resource_group }}" + name: "{{ storage_account_name_default }}04" + account_type: Standard_LRS + kind: StorageV2 + static_website: + enabled: false + register: output +- name: Assert output + assert: + that: + - output.changed + - output.state.static_website is defined + - not output.state.static_website.enabled + - output.state.static_website.index_document == None + - output.state.static_website.error_document404_path == None + +- name: Create new storage account with I(kind=FileStorage) + azure_rm_storageaccount: + resource_group: "{{ resource_group }}" + name: "{{ storage_account_name_default }}02" + account_type: Premium_ZRS + kind: FileStorage + register: filestorage_output +- name: Assert status succeeded and results match I(kind=FileStorage) + assert: + that: + - filestorage_output.changed + - filestorage_output.state.sku_name == "Premium_ZRS" + +- name: Create new storage account with explicit parameters + azure_rm_storageaccount: + access_tier: Hot + account_type: Premium_LRS + allow_blob_public_access: False + append_tags: false + blob_cors: + - allowed_origins: + - http://www.example.com/ + allowed_methods: + - GET + - POST + allowed_headers: + - x-ms-meta-data* + - x-ms-meta-target* + - x-ms-meta-abc + exposed_headers: + - x-ms-meta-* + max_age_in_seconds: 200 + https_only: False + kind: StorageV2 + location: eastus + minimum_tls_version: 'TLS1_2' + name: "{{ storage_account_name_explicit }}" + network_acls: + bypass: AzureServices + default_action: Deny + ip_rules: + - value: '9.9.9.9' + action: Allow + resource_group: "{{ resource_group }}" + tags: + test: test + galaxy: galaxy + register: explicit_output +- name: Assert status succeeded and correct parameter results + assert: + that: + - explicit_output.changed + - explicit_output.state.id is defined + - explicit_output.state.blob_cors | length == 1 + - not explicit_output.state.https_only + - not explicit_output.state.allow_blob_public_access + - explicit_output.state.minimum_tls_version == 'TLS1_2' + - explicit_output.state.network_acls.bypass == "AzureServices" + - explicit_output.state.network_acls.default_action == "Deny" + - explicit_output.state.network_acls.ip_rules | length == 1 + +- name: Update existing storage account (idempotence) + azure_rm_storageaccount: + access_tier: Hot + account_type: Premium_LRS + allow_blob_public_access: False + append_tags: false + blob_cors: + - allowed_origins: + - http://www.example.com/ + allowed_methods: + - GET + - POST + allowed_headers: + - x-ms-meta-data* + - x-ms-meta-target* + - x-ms-meta-abc + exposed_headers: + - x-ms-meta-* + max_age_in_seconds: 200 + https_only: False + kind: StorageV2 + location: eastus + minimum_tls_version: 'TLS1_2' + name: "{{ storage_account_name_explicit }}" + network_acls: + bypass: AzureServices + default_action: Deny + ip_rules: + - value: '9.9.9.9' + action: Allow + resource_group: "{{ resource_group }}" + tags: + test: test + galaxy: galaxy + register: output +- name: Assert that properties have not changed + assert: + that: + - not output.changed + - output.state.access_tier == explicit_output.state.access_tier + - output.state.allow_blob_public_access == explicit_output.state.allow_blob_public_access + - output.state.blob_cors == explicit_output.state.blob_cors + - output.state.custom_domain == explicit_output.state.custom_domain + - output.state.https_only == explicit_output.state.https_only + - output.state.id == explicit_output.state.id + - output.state.location == explicit_output.state.location + - output.state.minimum_tls_version == explicit_output.state.minimum_tls_version + - output.state.name == explicit_output.state.name + - output.state.network_acls == explicit_output.state.network_acls + - output.state.primary_endpoints == explicit_output.state.primary_endpoints + - output.state.primary_location == explicit_output.state.primary_location + - output.state.secondary_endpoints == explicit_output.state.secondary_endpoints + - output.state.secondary_location == explicit_output.state.secondary_location + - output.state.sku_name == explicit_output.state.sku_name + - output.state.sku_tier == explicit_output.state.sku_tier + - output.state.tags == explicit_output.state.tags + +- name: Update existing storage account with parameters omitted + azure_rm_storageaccount: + resource_group: "{{ resource_group }}" + name: "{{ storage_account_name_explicit }}" + register: output +- name: Assert that properties have not changed + assert: + that: + - not output.changed + - output.state.access_tier == explicit_output.state.access_tier + - output.state.allow_blob_public_access == explicit_output.state.allow_blob_public_access + - output.state.blob_cors == explicit_output.state.blob_cors + - output.state.custom_domain == explicit_output.state.custom_domain + - output.state.https_only == explicit_output.state.https_only + - output.state.id == explicit_output.state.id + - output.state.location == explicit_output.state.location + - output.state.minimum_tls_version == explicit_output.state.minimum_tls_version + - output.state.name == explicit_output.state.name + - output.state.network_acls == explicit_output.state.network_acls + - output.state.primary_endpoints == explicit_output.state.primary_endpoints + - output.state.primary_location == explicit_output.state.primary_location + - output.state.secondary_endpoints == explicit_output.state.secondary_endpoints + - output.state.secondary_location == explicit_output.state.secondary_location + - output.state.sku_name == explicit_output.state.sku_name + - output.state.sku_tier == explicit_output.state.sku_tier + - output.state.tags == explicit_output.state.tags + +- name: Update existing storage account with parameters defined + azure_rm_storageaccount: + resource_group: "{{ resource_group }}" + name: "{{ storage_account_name_default }}" + allow_blob_public_access: False + append_tags: false + blob_cors: + - allowed_origins: + - http://www.example.com/ + allowed_methods: + - GET + - POST + allowed_headers: + - x-ms-meta-data* + - x-ms-meta-target* + - x-ms-meta-abc + exposed_headers: + - x-ms-meta-* + max_age_in_seconds: 200 + https_only: False + kind: StorageV2 + minimum_tls_version: 'TLS1_1' + network_acls: + bypass: AzureServices + default_action: Deny + ip_rules: + - value: '9.9.9.9' + action: Allow + tags: + test: test + galaxy: galaxy + register: output +- name: Assert account change success + assert: + that: + - output.changed + - output.state.allow_blob_public_access == False + - output.state.allow_blob_public_access != None + - output.state.https_only == False + - output.state.https_only != None + - output.state.minimum_tls_version == 'TLS1_1' + - output.state.name == storage_account_name_default + - output.state.tags == explicit_output.state.tags + # These tests should be valid, but is currently broken due to 'output' not containing blob_cors and network_acls.ip_rules + # - output.state.blob_cors == explicit_output.state.blob_cors + # - output.state.network_acls == explicit_output.state.network_acls + +- name: Change existing account type (invalid) + azure_rm_storageaccount: + resource_group: "{{ resource_group }}" + name: "{{ storage_account_name_default }}" + account_type: Premium_LRS + register: output + ignore_errors: true +- name: Assert account type change failed + assert: + that: + - output.failed + - output.msg is regex('Storage account of type .* cannot be changed') + +- name: Unverified custom domain failure + azure_rm_storageaccount: + resource_group: "{{ resource_group }}" + name: "{{ storage_account_name_default }}" + custom_domain: + name: ansible.com + use_sub_domain: false + ignore_errors: true + register: output +- name: Assert CNAME failure + assert: + that: + - output.failed + - output.msg is regex('custom domain name could not be verified') + +- name: Create storage account with no public access + azure_rm_storageaccount: + name: "{{ storage_account_name_default }}03" + resource_group: "{{ resource_group }}" + account_type: Standard_LRS + https_only: true + minimum_tls_version: 'TLS1_2' + allow_blob_public_access: false + public_network_access: 'Disabled' + register: output +- name: Assert desired account config + assert: + that: + - output.changed + - output.state.https_only + - output.state.minimum_tls_version == 'TLS1_2' + - not output.state.allow_blob_public_access + - output.state.public_network_access == 'Disabled' + +- name: Create storage account with no public access (idempotent) + azure_rm_storageaccount: + name: "{{ storage_account_name_default }}03" + resource_group: "{{ resource_group }}" + account_type: Standard_LRS + https_only: true + minimum_tls_version: 'TLS1_2' + allow_blob_public_access: false + public_network_access: 'Disabled' + register: output +- name: Assert no change + assert: + that: + - not output.changed + +- name: Gather facts by tags + azure_rm_storageaccount_info: + resource_group: "{{ resource_group }}" + tags: + - test + - galaxy + register: output +- assert: + that: output.storageaccounts | length >= 1 + +- name: Update account tags + azure_rm_storageaccount: + resource_group: "{{ resource_group }}" + name: "{{ storage_account_name_explicit }}" + append_tags: false + tags: + testing: testing + delete: never + register: output +- assert: + that: + - "output.state.tags | length == 2" + - "output.state.tags.testing == 'testing'" + - "output.state.tags.delete == 'never'" + +- name: Gather facts connection string and blob_cors + azure_rm_storageaccount_info: + resource_group: "{{ resource_group }}" + name: "{{ storage_account_name_explicit }}" + show_connection_string: True + show_blob_cors: True + register: output +- assert: + that: + - "output.storageaccounts | length == 1" + - not output.storageaccounts[0].custom_domain + - output.storageaccounts[0].account_type == "Premium_LRS" + - output.storageaccounts[0].primary_endpoints.blob.connectionstring + - output.storageaccounts[0].blob_cors + - output.storageaccounts[0].minimum_tls_version == "TLS1_2" + - not output.storageaccounts[0].allow_blob_public_access + - not output.storageaccounts[0].https_only + - output.storageaccounts[0].network_acls.bypass == "AzureServices" + - output.storageaccounts[0].network_acls.default_action == "Deny" + - output.storageaccounts[0].network_acls.ip_rules | length == 1 + +- name: Gather enabled static website properties + azure_rm_storageaccount_info: + resource_group: "{{ resource_group }}" + name: "{{ storage_account_name_default }}01" + register: output +- assert: + that: + - output.storageaccounts | length == 1 + - output.storageaccounts[0].static_website is defined + - output.storageaccounts[0].static_website.enabled + - output.storageaccounts[0].static_website.index_document == 'index.html' + - output.storageaccounts[0].static_website.error_document404_path == 'error.html' + +- name: Gather disabled static website properties + azure_rm_storageaccount_info: + resource_group: "{{ resource_group }}" + name: "{{ storage_account_name_default }}04" + register: output +- assert: + that: + - output.storageaccounts | length == 1 + - output.storageaccounts[0].static_website is defined + - not output.storageaccounts[0].static_website.enabled + - output.storageaccounts[0].static_website.index_document == None + - output.storageaccounts[0].static_website.error_document404_path == None + +- name: Create new storage account with (require_infrastructure_encryption=false) + azure_rm_storageaccount: + resource_group: "{{ resource_group }}" + name: "{{ storage_account_name_default }}05" + account_type: Standard_RAGRS + encryption: + services: + blob: + enabled: true + file: + enabled: true + require_infrastructure_encryption: false + key_source: Microsoft.Storage + register: encryption_output + +- name: Assert storage account with (require_infrastructure_encryption=false) created + assert: + that: + - encryption_output.changed + +- name: Get account with (require_infrastructure_encryption=false) + azure_rm_storageaccount_info: + resource_group: "{{ resource_group }}" + name: "{{ storage_account_name_default }}05" + register: encryption_output + +- assert: + that: + - output.storageaccounts | length == 1 + - output.storageaccounts[0].encryption.key_source == 'Microsoft.Storage' + - not output.storageaccounts[0].encryption.require_infrastructure_encryption + - output.storageaccounts[0].encryption.services | length == 2 + +- name: List storage accounts by resource group. + azure_rm_storageaccount_info: + resource_group: "{{ resource_group }}" + register: output +- assert: + that: + - "output.storageaccounts | length >= 2" + +- name: Delete storage accounts + azure_rm_storageaccount: + resource_group: "{{ resource_group }}" + name: "{{ item }}" + state: absent + force_delete_nonempty: True + loop: + - "{{ storage_account_name_default }}" + - "{{ storage_account_name_explicit }}" + - "{{ storage_account_name_default }}01" + - "{{ storage_account_name_default }}02" + - "{{ storage_account_name_default }}03" + - "{{ storage_account_name_default }}04" + - "{{ storage_account_name_default }}05" diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageblob/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageblob/aliases new file mode 100644 index 000000000..aa77c071a --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageblob/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group2 +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageblob/files/Ratings.png b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageblob/files/Ratings.png new file mode 100644 index 000000000..8dd3e3dbc Binary files /dev/null and b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageblob/files/Ratings.png differ diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageblob/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageblob/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageblob/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageblob/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageblob/tasks/main.yml new file mode 100644 index 000000000..8ad07331f --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageblob/tasks/main.yml @@ -0,0 +1,110 @@ +- name: Create storage account name + set_fact: + storage_account: "sb{{ resource_group | hash('md5') | truncate(22, True, '') }}" + test1_file: "./targets/azure_rm_storageblob/files/Ratings.png" + +- name: Create storage account + azure_rm_storageaccount: + resource_group: "{{ resource_group }}" + name: "{{ storage_account }}" + account_type: Standard_LRS + +- name: Create container + azure_rm_storageblob: + resource_group: "{{ resource_group }}" + account_name: "{{ storage_account }}" + container_name: my-blobs + +- name: Force upload blob + azure_rm_storageblob: + resource_group: "{{ resource_group }}" + account_name: "{{ storage_account }}" + container_name: my-blobs + blob: 'Ratings.png' + src: '{{ test1_file }}' + content_type: image/png + tags: + val1: foo + val2: bar + force: true + +- name: Upload blob idempotence + azure_rm_storageblob: + resource_group: "{{ resource_group }}" + account_name: "{{ storage_account }}" + container_name: my-blobs + blob: 'Ratings.png' + src: '{{ test1_file }}' + content_type: image/png + tags: + val1: foo + val2: bar + register: upload_facts +- assert: + that: "not upload_facts.changed" + +- name: Download file idempotence + azure_rm_storageblob: + resource_group: "{{ resource_group }}" + account_name: "{{ storage_account }}" + container_name: my-blobs + blob: 'Ratings.png' + dest: '{{ test1_file }}' + register: download_results +- assert: + that: not download_results.changed + +- file: path="/tmp/Ratings.png" state=absent + +- name: Download file + azure_rm_storageblob: + resource_group: "{{ resource_group }}" + account_name: "{{ storage_account }}" + container_name: my-blobs + blob: 'Ratings.png' + dest: '/tmp/Ratings.png' + register: download_results +- assert: + that: "download_results.changed" + +- find: paths='/tmp' patterns="Ratings.png" + register: find_results +- assert: + that: "find_results['matched'] == 1" + +- name: Do not delete container that has blobs + azure_rm_storageblob: + resource_group: "{{ resource_group }}" + account_name: "{{ storage_account }}" + container_name: my-blobs + state: absent + register: output +- assert: + that: "not output.changed" + +- name: Delete blob object + azure_rm_storageblob: + resource_group: "{{ resource_group }}" + account_name: "{{ storage_account }}" + container_name: my-blobs + blob: "Ratings.png" + state: absent + register: output +- assert: + that: "output.changed" + +- name: Delete container + azure_rm_storageblob: + resource_group: "{{ resource_group }}" + account_name: "{{ storage_account }}" + container_name: my-blobs + state: absent + register: output +- assert: + that: "output.changed" + +- name: Delete storage account + azure_rm_storageaccount: + resource_group: "{{ resource_group }}" + name: "{{ storage_account }}" + state: absent diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageshare/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageshare/aliases new file mode 100644 index 000000000..aa77c071a --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageshare/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group2 +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageshare/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageshare/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageshare/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageshare/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageshare/tasks/main.yml new file mode 100644 index 000000000..ffe672d4d --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageshare/tasks/main.yml @@ -0,0 +1,126 @@ +--- +- name: Set storage account name + set_fact: + storage_account: "sb{{ resource_group | hash('md5') | truncate(22, True, '') }}" + +- name: Create storage account + azure_rm_storageaccount: + resource_group: "{{ resource_group }}" + name: "{{ storage_account }}" + account_type: Standard_LRS + kind: StorageV2 + +- name: Set storage share facts + set_fact: + share_name: testshare + quota: 32 + access_tier: Cool + metadata: + source: ansible + purpose: test + +- name: Create share + azure_rm_storageshare: + name: "{{ share_name }}" + resource_group: "{{ resource_group }}" + account_name: "{{ storage_account }}" + quota: "{{ quota }}" + metadata: "{{ metadata }}" + access_tier: "{{ access_tier }}" + register: create_result + +- name: Assert create success + assert: + that: + - create_result.changed + - create_result.state.name == share_name + - create_result.state.share_quota == quota + - create_result.state.metadata.source == metadata.source + - create_result.state.metadata.purpose == metadata.purpose + - create_result.state.access_tier == access_tier + - create_result.state.id is defined + - create_result.state.etag is defined + +- name: Run again to check idempotence + azure_rm_storageshare: + name: "{{ share_name }}" + resource_group: "{{ resource_group }}" + account_name: "{{ storage_account }}" + register: create_result + +- name: Assert idempotence + assert: + that: not create_result.changed + +- name: Get share details + azure_rm_storageshare_info: + name: "{{ share_name }}" + resource_group: "{{ resource_group }}" + account_name: "{{ storage_account }}" + register: share_facts + +- name: Assert storage share details + assert: + that: + - not share_facts.changed + - share_facts.storageshares.name == share_name + - share_facts.storageshares.share_quota == quota + - share_facts.storageshares.metadata.source == metadata.source + - share_facts.storageshares.metadata.purpose == metadata.purpose + - share_facts.storageshares.access_tier == access_tier + - share_facts.storageshares.id is defined + - share_facts.storageshares.etag is defined + +- name: Set new storage share facts + set_fact: + quota: 64 + access_tier: Hot + +- name: Update share + azure_rm_storageshare: + name: "{{ share_name }}" + resource_group: "{{ resource_group }}" + account_name: "{{ storage_account }}" + quota: "{{ quota }}" + access_tier: "{{ access_tier }}" + register: update_result + +- name: Assert share update success + assert: + that: update_result.changed + +- name: Get updated details + azure_rm_storageshare_info: + name: "{{ share_name }}" + resource_group: "{{ resource_group }}" + account_name: "{{ storage_account }}" + register: share_facts + +- name: Assert storage share details + assert: + that: + - share_facts.storageshares.name == share_name + - share_facts.storageshares.share_quota == quota + - share_facts.storageshares.metadata.source== metadata.source + - share_facts.storageshares.metadata.purpose == metadata.purpose + - share_facts.storageshares.access_tier == access_tier + - share_facts.storageshares.id is defined + - share_facts.storageshares.etag is defined + +- name: Delete share + azure_rm_storageshare: + name: "{{ share_name }}" + resource_group: "{{ resource_group }}" + account_name: "{{ storage_account }}" + state: absent + register: delete_output + +- name: Pause for 3 minutes to waiting delete + pause: + minutes: 3 + +- name: Delete storage account + azure_rm_storageaccount: + resource_group: "{{ resource_group }}" + name: "{{ storage_account }}" + state: absent diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_subnet/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_subnet/aliases new file mode 100644 index 000000000..aa77c071a --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_subnet/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group2 +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_subnet/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_subnet/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_subnet/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_subnet/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_subnet/tasks/main.yml new file mode 100644 index 000000000..6e4e6cbb1 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_subnet/tasks/main.yml @@ -0,0 +1,320 @@ +- name: Create virtual network + azure_rm_virtualnetwork: + name: My_Virtual_Network + address_prefixes_cidr: + - 10.1.0.0/16 + - 172.100.0.0/16 + - fdda:e69b:1587:495e::/64 + dns_servers: + - 127.0.0.1 + - 127.0.0.3 + tags: + testing: testing + delete: on-exit + resource_group: "{{ resource_group }}" + +- name: Create route table + azure_rm_routetable: + name: routetableforsubnet + resource_group: "{{ resource_group }}" + register: route_table + +- name: Remove subnet + azure_rm_subnet: + state: absent + name: foobar + virtual_network_name: My_Virtual_Network + resource_group: "{{ resource_group }}" + +- name: Catch invalid cidr + azure_rm_subnet: + name: foobar + virtual_network_name: My_Virtual_Network + resource_group: "{{ resource_group }}" + address_prefix_cidr: "10.1.0/24" + register: output + ignore_errors: yes + +- assert: + that: output.failed + +- name: Add the subnet back + azure_rm_subnet: + name: foobar + virtual_network_name: My_Virtual_Network + resource_group: "{{ resource_group }}" + address_prefix_cidr: "10.1.0.0/24" + register: output + +- assert: + that: + - output.changed + - output.state.address_prefix == "10.1.0.0/24" + +- name: Add the subnet back (idempontent) + azure_rm_subnet: + name: foobar + virtual_network_name: My_Virtual_Network + resource_group: "{{ resource_group }}" + register: output + +- assert: + that: not output.changed + +- name: Create network security group + azure_rm_securitygroup: + name: secgroupfoo + resource_group: "{{ resource_group }}" + tags: + testing: testing + +- name: Update the subnet + azure_rm_subnet: + name: foobar + virtual_network_name: My_Virtual_Network + resource_group: "{{ resource_group }}" + address_prefix_cidr: "10.1.0.0/16" + security_group: secgroupfoo + service_endpoints: + - service: Microsoft.Sql + locations: + - eastus + - westus + +- name: Should be idempotent + azure_rm_subnet: + name: foobar + virtual_network_name: My_Virtual_Network + resource_group: "{{ resource_group }}" + address_prefix_cidr: "10.1.0.0/16" + service_endpoints: + - service: Microsoft.Sql + locations: + - eastus + - westus + register: output + +- assert: + that: not output.changed + +- name: Able to completely remove service endpoints + azure_rm_subnet: + name: foobar + virtual_network_name: My_Virtual_Network + resource_group: "{{ resource_group }}" + address_prefix_cidr: "10.1.0.0/16" + service_endpoints: [] + register: output + +- assert: + that: output.state.service_endpoints is not defined + +- name: Create network security group in another resource group + azure_rm_securitygroup: + name: secgroupfoo + resource_group: "{{ resource_group_secondary }}" + register: nsg + +- name: Update the subnet + azure_rm_subnet: + name: foobar + virtual_network_name: My_Virtual_Network + resource_group: "{{ resource_group }}" + address_prefix_cidr: "10.1.0.0/16" + route_table: "{{ route_table.id }}" + security_group: + name: secgroupfoo + resource_group: "{{ resource_group_secondary }}" + register: output + +- assert: + that: + - output.changed + - output.state.network_security_group.id == nsg.state.id + +- name: Update the subnet (idempotent) + azure_rm_subnet: + name: foobar + virtual_network_name: My_Virtual_Network + resource_group: "{{ resource_group }}" + address_prefix_cidr: "10.1.0.0/16" + route_table: "{{ route_table.id }}" + security_group: "{{ nsg.state.id }}" + register: output + +- assert: + that: not output.changed + +- name: Create subnet with IPv4 and IPv6 + azure_rm_subnet: + name: foobar01 + virtual_network_name: My_Virtual_Network + resource_group: "{{ resource_group }}" + address_prefixes_cidr: + - "172.100.0.0/16" + - "fdda:e69b:1587:495e::/64" + register: output + +- assert: + that: + - output.changed + - not output.state.address_prefix + - output.state.address_prefixes + +- name: Update the subnet to IPv4 and IPv6 (idempotent) + azure_rm_subnet: + name: foobar01 + virtual_network_name: My_Virtual_Network + resource_group: "{{ resource_group }}" + address_prefixes_cidr: + - "172.100.0.0/16" + - "fdda:e69b:1587:495e::/64" + register: output + +- assert: + that: not output.changed + +- name: Update the subnet's IPv4 and IPv6 address + azure_rm_subnet: + name: foobar01 + virtual_network_name: My_Virtual_Network + resource_group: "{{ resource_group }}" + address_prefixes_cidr: + - "172.100.0.0/24" + - "fdda:e69b:1587:495e::/64" + security_group: "{{ nsg.state.id }}" + register: output + +- assert: + that: + - output.changed + +- name: Update the subnet with network policies + azure_rm_subnet: + name: foobar01 + virtual_network_name: My_Virtual_Network + resource_group: "{{ resource_group }}" + private_link_service_network_policies: Disabled + private_endpoint_network_policies: Enabled + register: output + +- assert: + that: output + +- name: The subnet with network policies should be idempotent + azure_rm_subnet: + name: foobar01 + virtual_network_name: My_Virtual_Network + resource_group: "{{ resource_group }}" + private_link_service_network_policies: Disabled + private_endpoint_network_policies: Enabled + register: output + +- assert: + that: not output.changed + +- name: Update the subnet with delegations + azure_rm_subnet: + name: foobar01 + virtual_network_name: My_Virtual_Network + resource_group: "{{ resource_group }}" + delegations: + - name: 'mydeleg' + serviceName: 'Microsoft.ContainerInstance/containerGroups' + register: output + +- assert: + that: output + +- name: The subnet with delegations should be idempotent + azure_rm_subnet: + name: foobar01 + virtual_network_name: My_Virtual_Network + resource_group: "{{ resource_group }}" + delegations: + - name: 'mydeleg' + serviceName: 'Microsoft.ContainerInstance/containerGroups' + register: output + +- assert: + that: not output.changed + +- name: Get subnet facts + azure_rm_subnet_info: + name: foobar01 + virtual_network_name: My_Virtual_Network + resource_group: "{{ resource_group }}" + register: output + +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.subnets[0]['id'] != None + - output.subnets[0]['resource_group'] != None + - output.subnets[0]['virtual_network_name'] != None + - output.subnets[0]['name'] != None + - not output.subnets[0]['address_prefix_cidr'] + - output.subnets[0]['address_prefixes_cidr'] != None + - output.subnets[0]['security_group'] != None + - output.subnets[0]['provisioning_state'] != None + - output.subnets[0]['private_endpoint_network_policies'] != None + - output.subnets[0]['private_link_service_network_policies'] != None + - output.subnets[0]['delegations'] != None + +- name: Get subnet facts + azure_rm_subnet_info: + name: foobar + virtual_network_name: My_Virtual_Network + resource_group: "{{ resource_group }}" + register: output + +- name: Assert that facts are returned + assert: + that: + - output.changed == False + - output.subnets[0]['id'] != None + - output.subnets[0]['resource_group'] != None + - output.subnets[0]['virtual_network_name'] != None + - output.subnets[0]['name'] != None + - output.subnets[0]['route_table'] != None + - output.subnets[0]['address_prefix_cidr'] != None + - output.subnets[0]['security_group'] != None + - output.subnets[0]['provisioning_state'] != None + +- name: Remove subnet + azure_rm_subnet: + state: absent + name: foobar + virtual_network_name: My_Virtual_Network + resource_group: "{{ resource_group }}" + +- name: Remove subnet (idempotent) + azure_rm_subnet: + state: absent + name: foobar + virtual_network_name: My_Virtual_Network + resource_group: "{{ resource_group }}" + register: output + +- assert: + that: not output.changed + +- name: Remove subnet + azure_rm_subnet: + state: absent + name: foobar01 + virtual_network_name: My_Virtual_Network + resource_group: "{{ resource_group }}" + +- name: Remove security group + azure_rm_securitygroup: + resource_group: "{{ resource_group }}" + name: secgroupfoo + state: absent + +- name: Remove virtual network + azure_rm_virtualnetwork: + name: My_Virtual_Network + resource_group: "{{ resource_group }}" + state: absent diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_subscription/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_subscription/aliases new file mode 100644 index 000000000..aa77c071a --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_subscription/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group2 +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_subscription/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_subscription/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_subscription/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_subscription/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_subscription/tasks/main.yml new file mode 100644 index 000000000..113cfe546 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_subscription/tasks/main.yml @@ -0,0 +1,24 @@ +- name: Get list of all subscriptions + azure_rm_subscription_info: + all: True + register: az_all_subscriptions + +- name: Get a subscription by id + azure_rm_subscription_info: + id: "{{ az_all_subscriptions.subscriptions[0].subscription_id }}" + +- name: Get a subscription by name + azure_rm_subscription_info: + name: "{{ az_all_subscriptions.subscriptions[0].display_name }}" + +- name: Test invalid name id combo + azure_rm_subscription_info: + name: "{{ az_all_subscriptions.subscriptions[0].display_name }}" + id: "{{ az_all_subscriptions.subscriptions[0].subscription_id }}" + register: invalid_name + ignore_errors: yes + +- name: Assert task failed + assert: + that: + - "invalid_name['failed'] == True" diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_trafficmanagerprofile/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_trafficmanagerprofile/aliases new file mode 100644 index 000000000..46c379ff5 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_trafficmanagerprofile/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group13 +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_trafficmanagerprofile/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_trafficmanagerprofile/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_trafficmanagerprofile/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_trafficmanagerprofile/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_trafficmanagerprofile/tasks/main.yml new file mode 100644 index 000000000..865c56dc4 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_trafficmanagerprofile/tasks/main.yml @@ -0,0 +1,289 @@ +- name: Prepare random number + set_fact: + tmname: "tm{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + endpointname1: "ep1{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + endpointname2: "ep2{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + run_once: yes + + +- name: Create a Traffic Manager profile(check mode) + azure_rm_trafficmanagerprofile: + resource_group: "{{ resource_group }}" + name: "{{ tmname }}" + tags: + testing: testing + delete: on-exit + foo: bar + location: global + profile_status: enabled + routing_method: performance + dns_config: + relative_name: "{{ tmname }}" + ttl: 60 + monitor_config: + protocol: HTTPS + port: 80 + path: '/' + check_mode: yes + +- name: Check there is no Traffic Manager profile created + azure_rm_trafficmanagerprofile_info: + resource_group: "{{ resource_group }}" + name: "{{ tmname }}" + register: fact + +- name: Check there is no Traffic Manager profile created + assert: { that: "{{ fact.tms | length }} == 0" } + +- name: Create a Traffic Manager profile + azure_rm_trafficmanagerprofile: + resource_group: "{{ resource_group }}" + name: "{{ tmname }}" + tags: + testing: testing + delete: on-exit + foo: bar + location: global + profile_status: enabled + routing_method: performance + dns_config: + relative_name: "{{ tmname }}" + ttl: 60 + monitor_config: + protocol: HTTPS + port: 80 + path: '/' + register: tm + +- name: Assert the Traffic Manager profile is well created + assert: + that: + - tm.changed + +- name: Gather Traffic Manager profile facts + azure_rm_trafficmanagerprofile_info: + resource_group: "{{ resource_group }}" + name: "{{ tmname }}" + register: fact + +- name: Assert fact returns the created one + assert: + that: + - "fact.tms | length == 1" + - fact.tms[0].id == tm.id + - fact.tms[0].endpoints | length == 0 + +- name: Create a Traffic Manager profile (idempotent) + azure_rm_trafficmanagerprofile: + resource_group: "{{ resource_group }}" + name: "{{ tmname }}" + tags: + testing: testing + delete: on-exit + foo: bar + location: global + profile_status: enabled + routing_method: performance + dns_config: + relative_name: "{{ tmname }}" + ttl: 60 + monitor_config: + protocol: HTTPS + port: 80 + path: '/' + register: output + +- name: Assert idempotent + assert: + that: + - not output.changed + +- name: Update the Traffic Manager profile + azure_rm_trafficmanagerprofile: + resource_group: "{{ resource_group }}" + name: "{{ tmname }}" + tags: + testing: testing + delete: on-exit + foo: bar + location: global + profile_status: disabled + routing_method: priority + dns_config: + relative_name: "{{ tmname }}" + ttl: 60 + monitor_config: + protocol: HTTPS + port: 80 + path: '/' + register: output + +- name: Assert the Traffic Manager profile is updated + assert: + that: + - output.changed + +- name: Create Traffic Manager endpoint(check mode) + azure_rm_trafficmanagerendpoint: + resource_group: "{{ resource_group }}" + profile_name: "{{ tmname }}" + name: "{{ endpointname1 }}" + type: external_endpoints + location: westus + priority: 2 + weight: 1 + target: 1.2.3.4 + check_mode: yes + register: output + +- name: Assert check mode changed + assert: + that: + - output.changed + +- name: Get endpoint + azure_rm_trafficmanagerendpoint_info: + resource_group: "{{ resource_group }}" + profile_name: "{{ tmname }}" + register: facts + +- name: Check no endpoint created in check mode + assert: + that: + - facts.endpoints | length == 0 + +- name: Create Traffic Manager endpoint + azure_rm_trafficmanagerendpoint: + resource_group: "{{ resource_group }}" + profile_name: "{{ tmname }}" + name: "{{ endpointname1 }}" + type: external_endpoints + location: westus + priority: 2 + weight: 1 + target: 1.2.3.4 + register: output + +- name: Assert endpoint create changed + assert: + that: + - output.changed + +- name: Get endpoint + azure_rm_trafficmanagerendpoint_info: + resource_group: "{{ resource_group }}" + profile_name: "{{ tmname }}" + register: facts + +- name: Check endpoint created + assert: + that: + - facts.endpoints | length == 1 + - facts.endpoints[0].name == "{{ endpointname1 }}" + +- name: Create second Traffic Manager endpoint + azure_rm_trafficmanagerendpoint: + resource_group: "{{ resource_group }}" + profile_name: "{{ tmname }}" + name: "{{ endpointname2 }}" + type: external_endpoints + location: westus + priority: 1 + weight: 3 + target: 4.3.2.1 + +- name: Get endpoint + azure_rm_trafficmanagerendpoint_info: + resource_group: "{{ resource_group }}" + profile_name: "{{ tmname }}" + register: facts + +- name: Check 2 endpoint in profile + assert: + that: + - facts.endpoints | length == 2 + +- name: Create endpoint (idempotent) + azure_rm_trafficmanagerendpoint: + resource_group: "{{ resource_group }}" + profile_name: "{{ tmname }}" + name: "{{ endpointname2 }}" + type: external_endpoints + location: westus + priority: 1 + weight: 3 + target: 4.3.2.1 + register: output + +- name: Assert endpoint creation idempotent + assert: + that: + - output.changed == False + +- name: Delete second endpoint + azure_rm_trafficmanagerendpoint: + resource_group: "{{ resource_group }}" + profile_name: "{{ tmname }}" + name: "{{ endpointname2 }}" + type: external_endpoints + state: absent + register: output + +- name: Assert endpoint deletion changed + assert: + that: + - output.changed + +- name: Get endpoint + azure_rm_trafficmanagerendpoint_info: + resource_group: "{{ resource_group }}" + profile_name: "{{ tmname }}" + register: facts + +- name: Check 1 endpoint left in profile + assert: + that: + - facts.endpoints | length == 1 + +- name: Delete the Traffic Manager profile(check mode) + azure_rm_trafficmanagerprofile: + resource_group: "{{ resource_group }}" + name: "{{ tmname }}" + state: absent + check_mode: yes + +- name: Gather Traffic Manager profile facts + azure_rm_trafficmanagerprofile_info: + resource_group: "{{ resource_group }}" + name: "{{ tmname }}" + register: fact + +- name: Assert the traffic manager profile is still there + assert: + that: + - "fact.tms | length == 1" + - fact.tms[0].id == tm.id + - fact.tms[0].endpoints | length == 1 + +- name: Delete the Traffic Manager profile + azure_rm_trafficmanagerprofile: + resource_group: "{{ resource_group }}" + name: "{{ tmname }}" + state: absent + register: output + +- name: Assert the Traffic Manager profile is well deleted + assert: + that: + - output.changed + +- name: Get Traffic Manager profile fact + azure_rm_trafficmanagerprofile_info: + resource_group: "{{ resource_group }}" + name: "{{ tmname }}" + register: fact + +- name: Assert fact returns empty + assert: + that: + - "fact.tms | length == 0" diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualhub/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualhub/aliases new file mode 100644 index 000000000..5d29c6c4d --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualhub/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group10 +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualhub/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualhub/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualhub/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualhub/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualhub/tasks/main.yml new file mode 100644 index 000000000..36707bec5 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualhub/tasks/main.yml @@ -0,0 +1,94 @@ +- set_fact: + name: "{{ resource_group | hash('md5') | truncate(22, True, '') }}" + +- name: Create a VirtualHub (check mode) + azure_rm_virtualhub: + resource_group: "{{ resource_group }}" + name: "{{ name }}" + sku: Standard + check_mode: yes + +- name: Create a VirtualHub + azure_rm_virtualhub: + resource_group: "{{ resource_group }}" + name: "{{ name }}" + sku: Standard + register: output + +- name: Assert the virtual hub is well created + assert: + that: + - output.changed + - output.state.provisioning_state == 'Succeeded' + +- name: Create a VirtualHub (idempotent) + azure_rm_virtualhub: + resource_group: "{{ resource_group }}" + name: "{{ name }}" + sku: Standard + register: output + +- name: Assert idempotent + assert: + that: + - not output.changed + +- name: Get Virtual Hub Info + azure_rm_virtualhub_info: + resource_group: "{{ resource_group }}" + name: "{{ name }}" + register: output + +- name: Assert fact returns + assert: + that: + - output.virtual_hubs[0].provisioning_state == "Succeeded" + - output.virtual_hubs[0].sku == "Standard" + +- name: Create a VirtualWan + azure_rm_virtualwan: + resource_group: "{{ resource_group }}" + name: "wan-{{ name }}" + disable_vpn_encryption: true + allow_branch_to_branch_traffic: true + allow_vnet_to_vnet_traffic: true + virtual_wan_type: Standard + register: output + +- name: Create Second VirtualHub + azure_rm_virtualhub: + resource_group: "{{ resource_group }}" + name: "{{ name }}-seondary" + sku: Standard + address_prefix: 12.0.0.0/16 + virtual_wan: + id: "{{ output.state.id }}" + virtual_router_asn: 65515 + virtual_router_ips: + - 12.0.32.4 + - 12.0.32.5 + register: output + +- name: Assert the virtual hub is well created + assert: + that: + - output.changed + - output.state.provisioning_state == 'Succeeded' + +- name: Delete Second VirtualHub + azure_rm_virtualhub: + resource_group: "{{ resource_group }}" + name: "{{ name }}-seondary" + state: absent + +- name: Delete virtual hub + azure_rm_virtualhub: + resource_group: "{{ resource_group }}" + name: "{{ name }}" + state: absent + register: output + +- name: Assert the AKS instance is upgraded + assert: + that: + - output.changed diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualhubconnection/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualhubconnection/aliases new file mode 100644 index 000000000..5d29c6c4d --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualhubconnection/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group10 +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualhubconnection/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualhubconnection/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualhubconnection/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualhubconnection/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualhubconnection/tasks/main.yml new file mode 100644 index 000000000..562ecbacc --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualhubconnection/tasks/main.yml @@ -0,0 +1,163 @@ + - set_fact: + rpfx: "{{ resource_group | hash('md5') | truncate(8, True, '') }}" + + - name: Create virtual network + azure_rm_virtualnetwork: + name: "vnet{{ rpfx }}" + address_prefixes_cidr: + - 10.1.0.0/16 + - 172.100.0.0/16 + resource_group: "{{ resource_group }}" + register: vnet_output + + - name: Create a Virtual Wan + azure_rm_virtualwan: + resource_group: "{{ resource_group }}" + name: "virtualwan{{ rpfx }}" + office365_local_breakout_category: All + disable_vpn_encryption: true + allow_branch_to_branch_traffic: true + allow_vnet_to_vnet_traffic: true + virtual_wan_type: Basic + register: vwan_output + + - name: Create a VirtualHub + azure_rm_virtualhub: + resource_group: "{{ resource_group }}" + name: "vhub{{ rpfx }}" + sku: Standard + address_prefix: 12.0.0.0/16 + virtual_wan: + id: "{{ vwan_output.state.id }}" + virtual_router_asn: 65515 + virtual_router_ips: + - 12.0.32.4 + - 12.0.32.5 + register: output + + - name: Create virtual hub connection + azure_rm_virtualhubconnection: + resource_group: "{{ resource_group }}" + vhub_name: "vhub{{ rpfx }}" + name: "connection{{ rpfx }}" + enable_internet_security: true + allow_remote_vnet_to_use_hub_vnet_gateways: false + allow_hub_to_remote_vnet_transit: true + remote_virtual_network: + id: "{{ vnet_output.state.id }}" + routing_configuration: + propagated_route_tables: + labels: + - labels1 + - labels3 + vnet_routes: + static_routes: + - name: route1 + address_prefixes: + - 10.1.0.0/16 + - 10.2.0.0/16 + - 10.6.0.0/16 + next_hop_ip_address: 10.0.0.68 + - name: route2 + address_prefixes: + - 10.4.0.0/16 + next_hop_ip_address: 10.0.0.65 + register: output + + - name: Assert the virtual hub connection is well created + assert: + that: + - output.changed + + - name: Create virtual hub connection (idempotent test) + azure_rm_virtualhubconnection: + resource_group: "{{ resource_group }}" + vhub_name: "vhub{{ rpfx }}" + name: "connection{{ rpfx }}" + enable_internet_security: true + allow_remote_vnet_to_use_hub_vnet_gateways: false + allow_hub_to_remote_vnet_transit: true + remote_virtual_network: + id: "{{ vnet_output.state.id }}" + routing_configuration: + propagated_route_tables: + labels: + - labels1 + - labels3 + vnet_routes: + static_routes: + - name: route1 + address_prefixes: + - 10.1.0.0/16 + - 10.2.0.0/16 + - 10.6.0.0/16 + next_hop_ip_address: 10.0.0.68 + - name: route2 + address_prefixes: + - 10.4.0.0/16 + next_hop_ip_address: 10.0.0.65 + register: output + + - name: Assert the virtual hub connection no changed + assert: + that: + - not output.changed + + - name: Update virtual hub connection + azure_rm_virtualhubconnection: + resource_group: "{{ resource_group }}" + vhub_name: "vhub{{ rpfx }}" + name: "connection{{ rpfx }}" + enable_internet_security: false + allow_remote_vnet_to_use_hub_vnet_gateways: false + allow_hub_to_remote_vnet_transit: true + remote_virtual_network: + id: "{{ vnet_output.state.id }}" + routing_configuration: + propagated_route_tables: + labels: + - labels1 + - labels2 + - labels3 + vnet_routes: + static_routes: + - name: route1 + address_prefixes: + - 10.1.0.0/16 + - 10.2.0.0/16 + - 10.6.0.0/16 + - 10.7.0.0/16 + next_hop_ip_address: 10.0.0.68 + - name: route2 + address_prefixes: + - 10.4.0.0/16 + next_hop_ip_address: 10.0.0.65 + register: output + + - name: Assert the virtual hub connection no changed + assert: + that: + - output.changed + + - name: Get virtual hub connection info + azure_rm_virtualhubconnection_info: + resource_group: "{{ resource_group }}" + virtual_hub_name: "vhub{{ rpfx }}" + name: "connection{{ rpfx }}" + register: output + + - name: Assert fact returns + assert: + that: + - output.virtual_hub_connection[0].allow_hub_to_remote_vnet_transit + - not output.virtual_hub_connection[0].allow_remote_vnet_to_use_hub_vnet_gateways + - not output.virtual_hub_connection[0].enable_internet_security + - output.virtual_hub_connection[0].routing_configuration.propagated_route_tables.labels | length == 3 + - output.virtual_hub_connection[0].routing_configuration.vnet_routes.static_routes | length == 2 + + - name: Delete the virtual hub connection + azure_rm_virtualhubconnection: + resource_group: "{{ resource_group }}" + vhub_name: "vhub{{ rpfx }}" + name: "connection{{ rpfx }}" + state: absent diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/aliases new file mode 100644 index 000000000..7611d8265 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/aliases @@ -0,0 +1,4 @@ +cloud/azure +shippable/azure/group7 +destructive +azure_rm_virtualmachine_info diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/inventory.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/inventory.yml new file mode 100644 index 000000000..ceb88cd7e --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/inventory.yml @@ -0,0 +1,74 @@ +all: + hosts: + azure_test_invalid: + azure_test_public_ip: + network: 10.42.0.0/24 + subnet: 10.42.0.0/28 + + azure_test_no_public_ip: + network: 10.42.1.0/24 + subnet: 10.42.1.0/28 + + azure_test_deallocate: + network: 10.42.2.0/24 + subnet: 10.42.2.0/28 + + azure_test_minimal: + network: 10.42.3.0/24 + subnet: 10.42.3.0/28 + + azure_test_minimal_manageddisk: + network: 10.42.3.0/24 + subnet: 10.42.3.0/28 + + azure_test_dual_nic: + network: 10.42.4.0/24 + subnet: 10.42.4.0/28 + secondary_network: 10.42.5.0/24 + secondary_subnet: 10.42.5.0/28 + nic_list: + - name: "{{ 'int' ~ uid_short ~ '-1' }}" + resource_group: "{{ resource_group_secondary }}" + - name: "{{ 'int' ~ uid_short ~ '-2' }}" + resource_group: "{{ resource_group_secondary }}" + + azure_test_no_nsg: + network: 10.42.6.0/24 + subnet: 10.42.6.0/28 + + vars: + ansible_connection: local + ansible_python_interpreter: "{{ ansible_playbook_python }}" + + uid: "{{ (resource_group ~ inventory_hostname) | hash('md5') | truncate(18, True, '') }}" + uid_short: "{{ (resource_group ~ inventory_hostname) | hash('md5') | truncate(10, True, '') }}" + + storage_account: "{{ 'stor' ~ uid }}" + availability_set: "{{ 'avbs' ~ uid_short }}" + vm_name: "{{ 'vm' ~ uid_short }}" + network_name: "{{ 'vnet' ~ uid_short }}" + subnet_name: "{{ 'snet' ~ uid_short }}" + security_group: "{{ 'sg' ~ uid_short }}" + public_ip_name: "{{ 'ip' ~ uid_short }}" + interface_name: "{{ 'int' ~ uid_short }}" + + ssh_keys: + - path: '/home/chouseknecht/.ssh/authorized_keys' + key_data: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1igsIlcmTa/yfsJnTtnrEX7PP/a01gwbXcig6JOKyrUmJB8E6c/wtZwP115VSyDRTO6TEL/sBFUpkSw01zM8ydNATErh8meBlAlbnDq5NLhDXnMizgG0VNn0iLc/WplFTqkefsHXa8NtIxAtyEVIj/fKbK3XfBOdEpE3+MJYNtGlWyaod28W+5qmQPZDQys+YnE4OjSwN7D3g85/7dtLFvDH+lEC4ooJOaxVFr9VSMXUIkaRF6oI+R1Zu803LFSCTb4BfFOYOHPuQ/rEMP0KuUzggvP+TEBY14PEA2FoHOn+oRsT0ZR2+loGRaxSVqCQKaEHbNbkm+6Rllx2NQRO0BJxCSKRU1iifInLPxmSc4gvsHCKMAWy/tGkmKHPWIfN8hvwyDMK5MNBp/SJ1pVx4xuFDQjVWNbll0yk2+72uJgtFHHwEPK9QsOz45gX85vS3yhYCKrscS/W9h2l36SWwQXuGy4fXotE7esPsvNGAzBndHX1O8RMPg47qJXz059RyoGforoa9TnzIs3hIv+ts7ESx3OEq3HNk0FJ+wDka7IM7WQpGrVToJ0vfDy9Q46nw54vv5Zc/u4OZF3F5twHmyf3rLYKXRDuCvZQKT2iWQKVX6j63bq6orA5hwl22zndxWZNtOwtq8Sd0Ns0K/Fo/ggYDDGBtr68DwhA+MrxrHw== chouseknecht@ansible.com" + + image: + offer: CentOS + publisher: OpenLogic + sku: '7.1' + version: latest + + image_paid: + publisher: cognosys + offer: ubuntu-14-04-lts + sku: hardened-ubuntu-14-04 + version: latest + + plan_paid: + name: hardened-ubuntu-14-04 + product: ubuntu-14-04-lts + publisher: cognosys diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/main.yml new file mode 100644 index 000000000..c9c9158c4 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/main.yml @@ -0,0 +1,7 @@ +- name: Run Azure VM tests in parallel + hosts: all + gather_facts: no + strategy: free + tasks: + - name: Include tasks based on inventory hostname + include_tasks: tasks/{{ inventory_hostname }}.yml diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/runme.sh b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/runme.sh new file mode 100644 index 000000000..c7895c9d2 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/runme.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +set -eux + +ansible-playbook -i inventory.yml main.yml "$@" diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_deallocate.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_deallocate.yml new file mode 100644 index 000000000..3c86c7419 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_deallocate.yml @@ -0,0 +1,103 @@ +- include_tasks: setup.yml + +- name: Create minimal VM with defaults + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + admin_username: "testuser" + ssh_password_enabled: false + ssh_public_keys: + - path: /home/testuser/.ssh/authorized_keys + key_data: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfoYlIV4lTPZTv7hXaVwQQuqBgGs4yeNRX0SPo2+HQt9u4X7IGwrtXc0nEUm6LfaCikMH58bOL8f20NTGz285kxdFHZRcBXtqmnMz2rXwhK9gwq5h1khc+GzHtdcJXsGA4y0xuaNcidcg04jxAlN/06fwb/VYwwWTVbypNC0gpGEpWckCNm8vlDlA55sU5et0SZ+J0RKVvEaweUOeNbFZqckGPA384imfeYlADppK/7eAxqfBVadVvZG8IJk4yvATgaIENIFj2cXxqu2mQ/Bp5Wr45uApvJsFXmi+v/nkiOEV1QpLOnEwAZo6EfFS4CCQtsymxJCl1PxdJ5LD4ZOtP xiuxi.sun@qq.com" + vm_size: Standard_B1ms + virtual_network: "{{ network_name }}" + image: + offer: UbuntuServer + publisher: Canonical + sku: 16.04-LTS + version: latest + register: vm_output + +- name: Get VM facts + azure_rm_virtualmachine_info: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + register: vm_state + +- name: Pause for 10 mimutes to VM updating + shell: sleep 600 + +- name: Restart the virtual machine + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + restarted: yes + vm_size: Standard_B1ms + register: restart_result + +- name: Ensue VM was restarted + assert: + that: + - "azure_vm.powerstate in ['starting', 'running']" + - restart_result is changed + +- name: Deallocate the virtual machine + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + allocated: no + vm_size: Standard_B1ms + register: deallocate_result + +- name: Ensure VM was deallocated + assert: + that: + - azure_vm.powerstate == 'deallocated' + - deallocate_result is changed + +- name: Start the virtual machine + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + vm_size: Standard_B1ms + started: True + register: start_result + +- name: Ensure VM was started + assert: + that: + - "azure_vm.powerstate in ['starting', 'running']" + - start_result is changed + +- name: Delete VM + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + state: absent + remove_on_absent: all_autocreated + +- name: Destroy subnet + azure_rm_subnet: + resource_group: "{{ resource_group }}" + virtual_network: "{{ network_name }}" + name: "{{ subnet_name }}" + state: absent + +- name: Destroy virtual network + azure_rm_virtualnetwork: + resource_group: "{{ resource_group }}" + name: "{{ network_name }}" + state: absent + +- name: Destroy availability set + azure_rm_availabilityset: + resource_group: "{{ resource_group }}" + name: "{{ availability_set }}" + state: absent + +- name: Destroy storage account + azure_rm_storageaccount: + resource_group: "{{ resource_group }}" + name: "{{ storage_account }}" + force_delete_nonempty: yes + state: absent diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_dual_nic.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_dual_nic.yml new file mode 100644 index 000000000..6202e3534 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_dual_nic.yml @@ -0,0 +1,146 @@ +- include_tasks: setup.yml + +- name: Create virtual network in secondary resource group + azure_rm_virtualnetwork: + resource_group: "{{ resource_group_secondary }}" + name: "{{ network_name ~ '-2' }}" + address_prefixes: "{{ secondary_network }}" + register: create_virt_net_result + +- name: Create subnet in secondary resource group + azure_rm_subnet: + resource_group: "{{ resource_group_secondary }}" + name: "{{ subnet_name ~ '-2' }}" + address_prefix: "{{ secondary_subnet }}" + virtual_network: "{{ network_name ~ '-2' }}" + +- name: Create NICs for dual NIC VM in secondary resource group + azure_rm_networkinterface: + resource_group: "{{ item.resource_group }}" + name: "{{ item.name }}" + virtual_network: "{{ network_name ~ '-2' }}" + subnet: "{{ subnet_name ~ '-2' }}" + loop: "{{ nic_list }}" + +- name: Create virtual machine with two NICs + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" # Should this be resource_group_secondary? + name: "{{ vm_name }}" + vm_size: Standard_B1ms + storage_account: "{{ storage_account }}" + storage_container: "{{ vm_name }}" + storage_blob: "{{ vm_name }}.vhd" + admin_username: adminuser + ssh_password_enabled: false + ssh_public_keys: + - path: /home/adminuser/.ssh/authorized_keys + key_data: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfoYlIV4lTPZTv7hXaVwQQuqBgGs4yeNRX0SPo2+HQt9u4X7IGwrtXc0nEUm6LfaCikMH58bOL8f20NTGz285kxdFHZRcBXtqmnMz2rXwhK9gwq5h1khc+GzHtdcJXsGA4y0xuaNcidcg04jxAlN/06fwb/VYwwWTVbypNC0gpGEpWckCNm8vlDlA55sU5et0SZ+J0RKVvEaweUOeNbFZqckGPA384imfeYlADppK/7eAxqfBVadVvZG8IJk4yvATgaIENIFj2cXxqu2mQ/Bp5Wr45uApvJsFXmi+v/nkiOEV1QpLOnEwAZo6EfFS4CCQtsymxJCl1PxdJ5LD4ZOtP xiuxi.sun@qq.com" + short_hostname: testvm + os_type: Linux + os_disk_size_gb: 64 + os_disk_name: testosdiskxx + network_interfaces: "{{ nic_list }}" + availability_set: "{{ availability_set }}" + image: + offer: UbuntuServer + publisher: Canonical + sku: 16.04-LTS + version: latest + tags: + abc: def + +- name: Ensure VM was created properly + assert: + that: + - azure_vm.properties.availabilitySet.id + - azure_vm.properties.storageProfile.osDisk.name == 'testosdiskxx' + +- name: Retrieve VM facts (filtering by name) + azure_rm_virtualmachine_info: + resource_group: "{{ resource_group }}" # Should this be resource_group_secondary? + name: "{{ vm_name }}" + register: vm_facts_results + +- name: Ensure facts module returned the second VM + assert: + that: + - vm_facts_results.vms | length == 1 + - vm_facts_results.vms[0].name == "{{ vm_name }}" + - vm_facts_results.vms[0].location + - vm_facts_results.vms[0].admin_username == 'adminuser' + - vm_facts_results.vms[0].resource_group == "{{ resource_group }}" + - vm_facts_results.vms[0].power_state != None + +- name: Retrieve facts by tags + azure_rm_virtualmachine_info: + tags: + - abc:def + register: facts_by_tags_results + +- name: Assert that facts module returned the second VM + assert: + that: + - facts_by_tags_results.vms | length >= 1 + +- name: Should be idempotent with a dual NICs + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" # Should this be resource_group_secondary? + name: "{{ vm_name }}" + vm_size: Standard_B1ms + storage_account: "{{ storage_account }}" + storage_container: "{{ vm_name }}" + storage_blob: "{{ vm_name }}.vhd" + admin_username: adminuser + ssh_password_enabled: false + ssh_public_keys: + - path: /home/adminuser/.ssh/authorized_keys + key_data: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfoYlIV4lTPZTv7hXaVwQQuqBgGs4yeNRX0SPo2+HQt9u4X7IGwrtXc0nEUm6LfaCikMH58bOL8f20NTGz285kxdFHZRcBXtqmnMz2rXwhK9gwq5h1khc+GzHtdcJXsGA4y0xuaNcidcg04jxAlN/06fwb/VYwwWTVbypNC0gpGEpWckCNm8vlDlA55sU5et0SZ+J0RKVvEaweUOeNbFZqckGPA384imfeYlADppK/7eAxqfBVadVvZG8IJk4yvATgaIENIFj2cXxqu2mQ/Bp5Wr45uApvJsFXmi+v/nkiOEV1QpLOnEwAZo6EfFS4CCQtsymxJCl1PxdJ5LD4ZOtP xiuxi.sun@qq.com" + short_hostname: testvm + os_type: Linux + os_disk_size_gb: 64 + network_interfaces: "{{ nic_list }}" + availability_set: "{{ availability_set }}" + image: + offer: UbuntuServer + publisher: Canonical + sku: 16.04-LTS + version: latest + register: dual_nics_result + +- name: Ensure nothing changed + assert: + that: dual_nics_result is not changed + +- name: Get VM facts + azure_rm_virtualmachine_info: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + register: vm_state + +- name: Pause for 10 mimutes to VM updating + shell: sleep 600 + +- name: Generalize VM + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" # Should this be resource_group_secondary? + name: "{{ vm_name }}" + generalized: yes + +- name: Gather facts and check if machine is generalized + azure_rm_virtualmachine_info: + resource_group: "{{ resource_group }}" # Should this be resource_group_secondary? + name: "{{ vm_name }}" + register: generalized_output + +- name: Ensure power state is generalized + assert: + that: generalized_output.vms[0].power_state == 'generalized' + +- name: Delete dual NIC VM + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" # Should this be resource_group_secondary? + name: "{{ vm_name }}" + state: absent + vm_size: Standard_B1ms + async: 5000 + poll: 0 diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_ephemeral_os.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_ephemeral_os.yml new file mode 100644 index 000000000..4169333b9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_ephemeral_os.yml @@ -0,0 +1,130 @@ +- include_tasks: setup.yml + +- name: create proximity placement group + azure_rm_proximityplacementgroup: + resource_group: "{{ resource_group }}" + name: testproximityplacement + register: output + +- name: Create minimal VM with proximentplace group + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + admin_username: "testuser" + ssh_password_enabled: false + proximity_placement_group: + id: "{{ output.state.id }}" + ssh_public_keys: + - path: /home/testuser/.ssh/authorized_keys + key_data: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfoYlIV4lTPZTv7hXaVwQQuqBgGs4yeNRX0SPo2+HQt9u4X7IGwrtXc0nEUm6LfaCikMH58bOL8f20NTGz285kxdFHZRcBXtqmnMz2rXwhK9gwq5h1khc+GzHtdcJXsGA4y0xuaNcidcg04jxAlN/06fwb/VYwwWTVbypNC0gpGEpWckCNm8vlDlA55sU5et0SZ+J0RKVvEaweUOeNbFZqckGPA384imfeYlADppK/7eAxqfBVadVvZG8IJk4yvATgaIENIFj2cXxqu2mQ/Bp5Wr45uApvJsFXmi+v/nkiOEV1QpLOnEwAZo6EfFS4CCQtsymxJCl1PxdJ5LD4ZOtP xiuxi.sun@qq.com" + vm_size: Standard_DS2_v2 + image: + offer: UbuntuServer + publisher: Canonical + sku: 16.04-LTS + version: latest + register: vm_output + +- name: Get VM facts + azure_rm_virtualmachine_info: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + register: vm_state + +- name: Pause for 10 mimutes to VM updating + shell: sleep 600 + +- name: Generalize VM + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + generalized: yes + +- name: Create an image from VM + azure_rm_image: + resource_group: "{{ resource_group }}" + source: "{{ vm_name }}" + name: testimage + os_type: Linux + register: output + +- assert: + that: + - output.changed + +- name: Create virtual machine ephmeral OS disk + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}-02" + admin_username: "testuser" + ssh_password_enabled: false + ssh_public_keys: + - path: /home/testuser/.ssh/authorized_keys + key_data: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfoYlIV4lTPZTv7hXaVwQQuqBgGs4yeNRX0SPo2+HQt9u4X7IGwrtXc0nEUm6LfaCikMH58bOL8f20NTGz285kxdFHZRcBXtqmnMz2rXwhK9gwq5h1khc+GzHtdcJXsGA4y0xuaNcidcg04jxAlN/06fwb/VYwwWTVbypNC0gpGEpWckCNm8vlDlA55sU5et0SZ+J0RKVvEaweUOeNbFZqckGPA384imfeYlADppK/7eAxqfBVadVvZG8IJk4yvATgaIENIFj2cXxqu2mQ/Bp5Wr45uApvJsFXmi+v/nkiOEV1QpLOnEwAZo6EfFS4CCQtsymxJCl1PxdJ5LD4ZOtP xiuxi.sun@qq.com" + vm_size: Standard_DS2_v2 + os_disk_caching: ReadOnly + ephemeral_os_disk: True + image: testimage + register: output + +- assert: + that: + - output.changed + - output.ansible_facts.azure_vm.properties.storageProfile.osDisk.diffDiskSettings.option == 'Local' + +- name: Check virtual machine ephmeral OS disk idempotent + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}-02" + admin_username: "testuser" + ssh_password_enabled: false + ssh_public_keys: + - path: /home/testuser/.ssh/authorized_keys + key_data: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfoYlIV4lTPZTv7hXaVwQQuqBgGs4yeNRX0SPo2+HQt9u4X7IGwrtXc0nEUm6LfaCikMH58bOL8f20NTGz285kxdFHZRcBXtqmnMz2rXwhK9gwq5h1khc+GzHtdcJXsGA4y0xuaNcidcg04jxAlN/06fwb/VYwwWTVbypNC0gpGEpWckCNm8vlDlA55sU5et0SZ+J0RKVvEaweUOeNbFZqckGPA384imfeYlADppK/7eAxqfBVadVvZG8IJk4yvATgaIENIFj2cXxqu2mQ/Bp5Wr45uApvJsFXmi+v/nkiOEV1QpLOnEwAZo6EfFS4CCQtsymxJCl1PxdJ5LD4ZOtP xiuxi.sun@qq.com" + vm_size: Standard_DS2_v2 + os_disk_caching: ReadOnly + ephemeral_os_disk: True + image: testimage + register: output + +- assert: + that: + - not output.changed + +- name: Check virtual machine ephmeral OS disk can't update + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}-02" + admin_username: "testuser" + ssh_password_enabled: false + ssh_public_keys: + - path: /home/testuser/.ssh/authorized_keys + key_data: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfoYlIV4lTPZTv7hXaVwQQuqBgGs4yeNRX0SPo2+HQt9u4X7IGwrtXc0nEUm6LfaCikMH58bOL8f20NTGz285kxdFHZRcBXtqmnMz2rXwhK9gwq5h1khc+GzHtdcJXsGA4y0xuaNcidcg04jxAlN/06fwb/VYwwWTVbypNC0gpGEpWckCNm8vlDlA55sU5et0SZ+J0RKVvEaweUOeNbFZqckGPA384imfeYlADppK/7eAxqfBVadVvZG8IJk4yvATgaIENIFj2cXxqu2mQ/Bp5Wr45uApvJsFXmi+v/nkiOEV1QpLOnEwAZo6EfFS4CCQtsymxJCl1PxdJ5LD4ZOtP xiuxi.sun@qq.com" + vm_size: Standard_DS2_v2 + os_disk_caching: ReadOnly + ephemeral_os_disk: False + image: testimage + ignore_errors: yes + register: ouput + +- assert: + that: + - not output.changed + +- name: Delete VM + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + state: absent + +- name: Delete VM + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}-02" + state: absent + +- name: Create an image from VM + azure_rm_image: + resource_group: "{{ resource_group }}" + name: testimage + state: absent diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_image_latest.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_image_latest.yml new file mode 100644 index 000000000..812ef48d3 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_image_latest.yml @@ -0,0 +1,69 @@ +- include_tasks: setup.yml + +- name: List available versions for UbuntuServer image + azure_rm_virtualmachineimage_info: + location: eastus + publisher: Canonical + offer: UbuntuServer + sku: 16.04-LTS + register: image_list + +- name: Get latest UbuntuServer image name + set_fact: + latest_image_name: "{{ (image_list['vmimages'] | map(attribute='name') | sort(reverse=True))[0] }}" + +- name: Create minimal VM with defaults + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + admin_username: "testuser" + ssh_password_enabled: false + ssh_public_keys: + - path: /home/testuser/.ssh/authorized_keys + key_data: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfoYlIV4lTPZTv7hXaVwQQuqBgGs4yeNRX0SPo2+HQt9u4X7IGwrtXc0nEUm6LfaCikMH58bOL8f20NTGz285kxdFHZRcBXtqmnMz2rXwhK9gwq5h1khc+GzHtdcJXsGA4y0xuaNcidcg04jxAlN/06fwb/VYwwWTVbypNC0gpGEpWckCNm8vlDlA55sU5et0SZ+J0RKVvEaweUOeNbFZqckGPA384imfeYlADppK/7eAxqfBVadVvZG8IJk4yvATgaIENIFj2cXxqu2mQ/Bp5Wr45uApvJsFXmi+v/nkiOEV1QpLOnEwAZo6EfFS4CCQtsymxJCl1PxdJ5LD4ZOtP xiuxi.sun@qq.com" + vm_size: Standard_B1ms + virtual_network: "{{ network_name }}" + image: + offer: UbuntuServer + publisher: Canonical + sku: 16.04-LTS + version: latest + register: vm_output + +- name: Ensure VM was created using the latest UbuntuServer image version / name + assert: + that: + - vm_output.azure_vm.properties.storageProfile.imageReference.version == latest_image_name + +- name: Delete VM + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + remove_on_absent: all_autocreated + state: absent + +- name: Destroy subnet + azure_rm_subnet: + resource_group: "{{ resource_group }}" + virtual_network: "{{ network_name }}" + name: "{{ subnet_name }}" + state: absent + +- name: Destroy virtual network + azure_rm_virtualnetwork: + resource_group: "{{ resource_group }}" + name: "{{ network_name }}" + state: absent + +- name: Destroy availability set + azure_rm_availabilityset: + resource_group: "{{ resource_group }}" + name: "{{ availability_set }}" + state: absent + +- name: Destroy storage account + azure_rm_storageaccount: + resource_group: "{{ resource_group }}" + name: "{{ storage_account }}" + force_delete_nonempty: yes + state: absent diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_image_specific.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_image_specific.yml new file mode 100644 index 000000000..12fc28154 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_image_specific.yml @@ -0,0 +1,61 @@ +- include_tasks: setup.yml + +- name: Set specific UbuntuServer image version + set_fact: + specific_image_name: "16.04.202104140" + +- name: Create minimal VM with defaults + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + admin_username: "testuser" + ssh_password_enabled: false + ssh_public_keys: + - path: /home/testuser/.ssh/authorized_keys + key_data: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfoYlIV4lTPZTv7hXaVwQQuqBgGs4yeNRX0SPo2+HQt9u4X7IGwrtXc0nEUm6LfaCikMH58bOL8f20NTGz285kxdFHZRcBXtqmnMz2rXwhK9gwq5h1khc+GzHtdcJXsGA4y0xuaNcidcg04jxAlN/06fwb/VYwwWTVbypNC0gpGEpWckCNm8vlDlA55sU5et0SZ+J0RKVvEaweUOeNbFZqckGPA384imfeYlADppK/7eAxqfBVadVvZG8IJk4yvATgaIENIFj2cXxqu2mQ/Bp5Wr45uApvJsFXmi+v/nkiOEV1QpLOnEwAZo6EfFS4CCQtsymxJCl1PxdJ5LD4ZOtP xiuxi.sun@qq.com" + vm_size: Standard_B1ms + virtual_network: "{{ network_name }}" + image: + offer: UbuntuServer + publisher: Canonical + sku: 16.04-LTS + version: "{{ specific_image_name }}" + register: vm_output + +- name: Ensure VM was created using the specific UbuntuServer image version / name + assert: + that: + - vm_output.azure_vm.properties.storageProfile.imageReference.version == specific_image_name + +- name: Delete VM + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + remove_on_absent: all_autocreated + state: absent + +- name: Destroy subnet + azure_rm_subnet: + resource_group: "{{ resource_group }}" + virtual_network: "{{ network_name }}" + name: "{{ subnet_name }}" + state: absent + +- name: Destroy virtual network + azure_rm_virtualnetwork: + resource_group: "{{ resource_group }}" + name: "{{ network_name }}" + state: absent + +- name: Destroy availability set + azure_rm_availabilityset: + resource_group: "{{ resource_group }}" + name: "{{ availability_set }}" + state: absent + +- name: Destroy storage account + azure_rm_storageaccount: + resource_group: "{{ resource_group }}" + name: "{{ storage_account }}" + force_delete_nonempty: yes + state: absent diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_invalid.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_invalid.yml new file mode 100644 index 000000000..86ec72de1 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_invalid.yml @@ -0,0 +1,35 @@ +# TODO: Until we have a module to create/delete images this is the best tests I can do +- name: Assert error thrown with invalid image dict + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + image: + offer: UbuntuServer + register: fail_invalid_image_dict + failed_when: 'fail_invalid_image_dict.msg != "parameter error: expecting image to contain [publisher, offer, sku, version], [name, resource_group] or [id]"' + +- name: Assert error thrown with invalid image type + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + image: + - testing + register: fail_invalid_image_type + failed_when: 'fail_invalid_image_type.msg != "parameter error: expecting image to be a string or dict not list"' + +- name: Assert error finding missing custom image + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + image: invalid-image + register: fail_missing_custom_image + failed_when: fail_missing_custom_image.msg != "Error could not find image with name invalid-image" + +- name: Assert error finding missing custom image (dict style) + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + image: + name: invalid-image + register: fail_missing_custom_image_dict + failed_when: fail_missing_custom_image_dict.msg != "Error could not find image with name invalid-image" diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_minimal.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_minimal.yml new file mode 100644 index 000000000..750cbe0b4 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_minimal.yml @@ -0,0 +1,133 @@ +- include_tasks: setup.yml + +# # Tests possible when CI user acccount setup with required authority +# - name: Create virtual machine with image and plan which requires acceptance of terms +# azure_rm_virtualmachine: +# resource_group: "{{ resource_group }}" +# name: testvm009 +# vm_size: Standard_A0 +# storage_account: "{{ storage_account }}" +# storage_container: testvm001 +# storage_blob: testvm003.vhd +# admin_username: adminuser +# admin_password: Password123! +# short_hostname: testvm +# os_type: Linux +# availability_set: "{{ availability_set }}" +# image: "{{ image_paid }}" +# plan_paid: "{{ plan_paid }}" +# register: create_image_plan_result + +# - assert: +# that: +# - create_image_plan_result is changed +# - create_image_plan_result.ansible_facts.azure_vm.properties.storageProfile.imageReference.publisher == image_paid.publisher + +# - name: Should be idempotent with image and plan which requires acceptance of terms +# azure_rm_virtualmachine: +# resource_group: "{{ resource_group }}" +# name: testvm009 +# vm_size: Standard_A0 +# storage_account: "{{ storage_account }}" +# storage_container: testvm001 +# storage_blob: testvm003.vhd +# admin_username: adminuser +# admin_password: Password123! +# short_hostname: testvm +# os_type: Linux +# availability_set: "{{ availability_set }}" +# image: "{{ image_paid }}" +# plan_paid: "{{ plan_paid }}" +# register: create_image_plan_again_result + +# - assert: +# that: create_image_plan_again is not changed + +- name: Create minimal VM with defaults + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + admin_username: "testuser" + ssh_password_enabled: false + ssh_public_keys: + - path: /home/testuser/.ssh/authorized_keys + key_data: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfoYlIV4lTPZTv7hXaVwQQuqBgGs4yeNRX0SPo2+HQt9u4X7IGwrtXc0nEUm6LfaCikMH58bOL8f20NTGz285kxdFHZRcBXtqmnMz2rXwhK9gwq5h1khc+GzHtdcJXsGA4y0xuaNcidcg04jxAlN/06fwb/VYwwWTVbypNC0gpGEpWckCNm8vlDlA55sU5et0SZ+J0RKVvEaweUOeNbFZqckGPA384imfeYlADppK/7eAxqfBVadVvZG8IJk4yvATgaIENIFj2cXxqu2mQ/Bp5Wr45uApvJsFXmi+v/nkiOEV1QpLOnEwAZo6EfFS4CCQtsymxJCl1PxdJ5LD4ZOtP xiuxi.sun@qq.com" + vm_size: Standard_B1ms + virtual_network: "{{ network_name }}" + image: + offer: UbuntuServer + publisher: Canonical + sku: 16.04-LTS + version: latest + register: vm_output + +- name: Query auto created security group before deleting + azure_rm_securitygroup_info: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}01" + register: nsg_result + +- name: Assert that security group were exist before deleting + assert: + that: + - nsg_result.securitygroups | length == 1 + - nsg_result.securitygroups[0].network_interfaces | length == 1 + +- name: Delete VM + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + remove_on_absent: all_autocreated + state: absent + +- name: Query auto created NIC + azure_rm_networkinterface_info: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}01" + register: nic_result + +- name: Query auto created security group + azure_rm_securitygroup_info: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}01" + register: nsg_result + +- name: Query auto created public IP + azure_rm_publicipaddress_info: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}01" + register: pip_result + +- name: Assert that autocreated resources were deleted + assert: + that: + # what about the default storage group? + - nic_result.networkinterfaces | length == 0 + - nsg_result.securitygroups | length == 0 + - pip_result.publicipaddresses | length == 0 + +- name: Destroy subnet + azure_rm_subnet: + resource_group: "{{ resource_group }}" + virtual_network: "{{ network_name }}" + name: "{{ subnet_name }}" + state: absent + +- name: Destroy virtual network + azure_rm_virtualnetwork: + resource_group: "{{ resource_group }}" + name: "{{ network_name }}" + state: absent + +- name: Destroy availability set + azure_rm_availabilityset: + resource_group: "{{ resource_group }}" + name: "{{ availability_set }}" + state: absent + +- name: Destroy storage account + azure_rm_storageaccount: + resource_group: "{{ resource_group }}" + name: "{{ storage_account }}" + force_delete_nonempty: yes + state: absent diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_minimal_manageddisk.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_minimal_manageddisk.yml new file mode 100644 index 000000000..27274b0de --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_minimal_manageddisk.yml @@ -0,0 +1,89 @@ +- name: Create virtual network + azure_rm_virtualnetwork: + resource_group: "{{ resource_group }}" + name: "{{ network_name }}-disk" + address_prefixes: "{{ network }}" + location: westeurope + +- name: Create subnet + azure_rm_subnet: + resource_group: "{{ resource_group }}" + name: "{{ subnet_name }}" + address_prefix: "{{ subnet }}" + virtual_network: "{{ network_name }}-disk" + +- name: Create network interface + azure_rm_networkinterface: + resource_group: "{{ resource_group }}" + name: "{{ interface_name }}-disk" + virtual_network: "{{ network_name }}-disk" + subnet: "{{ subnet_name }}" + location: westeurope + +- name: Create minimal VM with defaults and a custom managed disk type + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + admin_username: "testuser" + ssh_password_enabled: false + managed_disk_type: StandardSSD_ZRS + public_ip_allocation_method: Disabled + location: westeurope + network_interface_names: + - name: "{{ interface_name }}-disk" + resource_group: "{{ resource_group }}" + ssh_public_keys: + - path: /home/testuser/.ssh/authorized_keys + key_data: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfoYlIV4lTPZTv7hXaVwQQuqBgGs4yeNRX0SPo2+HQt9u4X7IGwrtXc0nEUm6LfaCikMH58bOL8f20NTGz285kxdFHZRcBXtqmnMz2rXwhK9gwq5h1khc+GzHtdcJXsGA4y0xuaNcidcg04jxAlN/06fwb/VYwwWTVbypNC0gpGEpWckCNm8vlDlA55sU5et0SZ+J0RKVvEaweUOeNbFZqckGPA384imfeYlADppK/7eAxqfBVadVvZG8IJk4yvATgaIENIFj2cXxqu2mQ/Bp5Wr45uApvJsFXmi+v/nkiOEV1QpLOnEwAZo6EfFS4CCQtsymxJCl1PxdJ5LD4ZOtP xiuxi.sun@qq.com" + vm_size: Standard_D4s_v3 + virtual_network: "{{ network_name }}-disk" + image: + offer: UbuntuServer + publisher: Canonical + sku: 16.04-LTS + version: latest + register: vm_output + +- name: Assert status succeeded + assert: + that: + - vm_output.changed + +- name: Delete VM + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + remove_on_absent: all_autocreated + state: absent + +- name: Destroy NIC + azure_rm_networkinterface: + resource_group: "{{ resource_group }}" + name: "{{ interface_name }}-disk" + state: absent + ignore_errors: true + +- name: Delete network security group + azure_rm_securitygroup: + resource_group: "{{ resource_group }}" + name: "{{ interface_name }}-disk" + state: absent + +- name: Destroy disk + azure_rm_manageddisk: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + state: absent + +- name: Destroy subnet + azure_rm_subnet: + resource_group: "{{ resource_group }}" + virtual_network: "{{ network_name }}" + name: "{{ subnet_name }}-disk" + state: absent + +- name: Destroy virtual network + azure_rm_virtualnetwork: + resource_group: "{{ resource_group }}" + name: "{{ network_name }}-disk" + state: absent diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_no_nsg.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_no_nsg.yml new file mode 100644 index 000000000..7b88dd8d8 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_no_nsg.yml @@ -0,0 +1,83 @@ +- include_tasks: setup.yml + +- name: Create minimal VM with defaults + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + admin_username: "testuser" + ssh_password_enabled: false + ssh_public_keys: + - path: /home/testuser/.ssh/authorized_keys + key_data: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfoYlIV4lTPZTv7hXaVwQQuqBgGs4yeNRX0SPo2+HQt9u4X7IGwrtXc0nEUm6LfaCikMH58bOL8f20NTGz285kxdFHZRcBXtqmnMz2rXwhK9gwq5h1khc+GzHtdcJXsGA4y0xuaNcidcg04jxAlN/06fwb/VYwwWTVbypNC0gpGEpWckCNm8vlDlA55sU5et0SZ+J0RKVvEaweUOeNbFZqckGPA384imfeYlADppK/7eAxqfBVadVvZG8IJk4yvATgaIENIFj2cXxqu2mQ/Bp5Wr45uApvJsFXmi+v/nkiOEV1QpLOnEwAZo6EfFS4CCQtsymxJCl1PxdJ5LD4ZOtP xiuxi.sun@qq.com" + vm_size: Standard_B1ms + virtual_network: "{{ network_name }}" + created_nsg: false + image: + offer: UbuntuServer + publisher: Canonical + sku: 16.04-LTS + version: latest + register: vm_output + +- name: Query auto created security group before deleting + azure_rm_securitygroup_info: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}01" + register: nsg_result + +- name: Assert that security group were not exist before deleting + assert: + that: + - nsg_result.securitygroups | length == 0 + +- name: Delete VM + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + remove_on_absent: all_autocreated + state: absent + +- name: Query auto created NIC + azure_rm_networkinterface_info: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}01" + register: nic_result + +- name: Query auto created public IP + azure_rm_publicipaddress_info: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}01" + register: pip_result + +- name: Assert that autocreated resources were deleted + assert: + that: + # what about the default storage group? + - nic_result.networkinterfaces | length == 0 + - pip_result.publicipaddresses | length == 0 + +- name: Destroy subnet + azure_rm_subnet: + resource_group: "{{ resource_group }}" + virtual_network: "{{ network_name }}" + name: "{{ subnet_name }}" + state: absent + +- name: Destroy virtual network + azure_rm_virtualnetwork: + resource_group: "{{ resource_group }}" + name: "{{ network_name }}" + state: absent + +- name: Destroy availability set + azure_rm_availabilityset: + resource_group: "{{ resource_group }}" + name: "{{ availability_set }}" + state: absent + +- name: Destroy storage account + azure_rm_storageaccount: + resource_group: "{{ resource_group }}" + name: "{{ storage_account }}" + force_delete_nonempty: yes + state: absent diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_no_public_ip.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_no_public_ip.yml new file mode 100644 index 000000000..5b41a6eb9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_no_public_ip.yml @@ -0,0 +1,44 @@ +- include_tasks: setup.yml + +- name: Create virtual machine without public ip address and with boot diagnostics enabled + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + vm_size: Standard_B1ms + admin_username: adminuser + ssh_password_enabled: false + ssh_public_keys: + - path: /home/adminuser/.ssh/authorized_keys + key_data: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfoYlIV4lTPZTv7hXaVwQQuqBgGs4yeNRX0SPo2+HQt9u4X7IGwrtXc0nEUm6LfaCikMH58bOL8f20NTGz285kxdFHZRcBXtqmnMz2rXwhK9gwq5h1khc+GzHtdcJXsGA4y0xuaNcidcg04jxAlN/06fwb/VYwwWTVbypNC0gpGEpWckCNm8vlDlA55sU5et0SZ+J0RKVvEaweUOeNbFZqckGPA384imfeYlADppK/7eAxqfBVadVvZG8IJk4yvATgaIENIFj2cXxqu2mQ/Bp5Wr45uApvJsFXmi+v/nkiOEV1QpLOnEwAZo6EfFS4CCQtsymxJCl1PxdJ5LD4ZOtP xiuxi.sun@qq.com" + short_hostname: testvm + os_type: Linux + public_ip_allocation_method: Disabled + storage_account_name: "{{ storage_account }}" + availability_set: "{{ availability_set }}" + virtual_network: "{{ network_name }}" + boot_diagnostics: + enabled: yes + image: + offer: UbuntuServer + publisher: Canonical + sku: 16.04-LTS + version: latest + register: create_vm_public_result + +- name: Ensure VM was created properly + assert: + that: + - azure_vm.properties.diagnosticsProfile.bootDiagnostics.enabled + - azure_vm.properties.diagnosticsProfile.bootDiagnostics.storageUri is defined + - azure_vm.properties.instanceView.bootDiagnostics.consoleScreenshotBlobUri is defined + - azure_vm.properties.instanceView.bootDiagnostics.serialConsoleLogBlobUri is defined + - not 'publicIPAddress' in create_vm_public_result.ansible_facts.azure_vm.properties.networkProfile.networkInterfaces[0].properties.ipConfigurations[0].properties + +- name: Delete VM with no public ip + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + state: absent + remove_on_absent: all_autocreated + async: 5000 + poll: 0 diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_public_ip.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_public_ip.yml new file mode 100644 index 000000000..6e3e47ea1 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_public_ip.yml @@ -0,0 +1,331 @@ +- include_tasks: setup.yml + +- name: Create public ip + azure_rm_publicipaddress: + resource_group: "{{ resource_group }}" + allocation_method: Static + name: "{{ public_ip_name }}" + +- name: Create security group + azure_rm_securitygroup: + resource_group: "{{ resource_group }}" + name: "{{ security_group }}" + purge_rules: yes + rules: + - name: ALLOW_SSH + protocol: Tcp + destination_port_range: 22 + access: Allow + priority: 100 + direction: Inbound + + - name: ALLOW_HTTP + protocol: Tcp + destination_port_range: 80 + access: Allow + priority: 110 + direction: Inbound + +- name: Create network interface + azure_rm_networkinterface: + resource_group: "{{ resource_group }}" + name: "{{ interface_name }}" + virtual_network: "{{ network_name }}" + subnet: "{{ subnet_name }}" + public_ip_name: "{{ public_ip_name }}" + security_group: "{{ security_group }}" + +- name: Create virtual machine with a single NIC and no boot diagnostics + register: output + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + vm_size: Standard_B1ms + storage_account: "{{ storage_account }}" + storage_container: "{{ vm_name }}" + storage_blob: "{{ vm_name }}.vhd" + admin_username: adminuser + ssh_password_enabled: false + ssh_public_keys: + - path: /home/adminuser/.ssh/authorized_keys + key_data: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfoYlIV4lTPZTv7hXaVwQQuqBgGs4yeNRX0SPo2+HQt9u4X7IGwrtXc0nEUm6LfaCikMH58bOL8f20NTGz285kxdFHZRcBXtqmnMz2rXwhK9gwq5h1khc+GzHtdcJXsGA4y0xuaNcidcg04jxAlN/06fwb/VYwwWTVbypNC0gpGEpWckCNm8vlDlA55sU5et0SZ+J0RKVvEaweUOeNbFZqckGPA384imfeYlADppK/7eAxqfBVadVvZG8IJk4yvATgaIENIFj2cXxqu2mQ/Bp5Wr45uApvJsFXmi+v/nkiOEV1QpLOnEwAZo6EfFS4CCQtsymxJCl1PxdJ5LD4ZOtP xiuxi.sun@qq.com" + short_hostname: testvm + os_type: Linux + network_interfaces: "{{ interface_name }}" + availability_set: "{{ availability_set }}" + image: + offer: UbuntuServer + publisher: Canonical + sku: 16.04-LTS + version: latest + custom_data: | + #!/bin/sh + echo "custom_data was executed" > /tmp/custom_data.txt + +- name: Ensure VM was created properly + assert: + that: + - azure_vm.properties.provisioningState == 'Succeeded' + - azure_vm.properties.availabilitySet.id + # initial response from creation has no diagnosticsProfile + # if you run it again however, there is one in the response + # so we handle both cases + - "'diagnosticsProfile' not in azure_vm.properties or not azure_vm.properties.diagnosticsProfile.bootDiagnostics.enabled" + +- name: Get facts for virtual machine without boot diagnostics disabled + azure_rm_virtualmachine_info: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + register: vm_facts_no_boot_diag_result + +- name: Ensure VM facts are correct + assert: + that: + - vm_facts_no_boot_diag_result.vms != [] + - not vm_facts_no_boot_diag_result.vms[0].boot_diagnostics.enabled + - not vm_facts_no_boot_diag_result.vms[0].boot_diagnostics.storage_uri + +- name: Get VM facts + azure_rm_virtualmachine_info: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + register: vm_state + +- name: Pause for 10 mimutes to VM updating + shell: sleep 600 + +- name: Enable boot diagnostics on an existing VM for the first time without specifying a storage account + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + boot_diagnostics: + enabled: yes + # without specifying storage_account you get a new default storage account for the VM + +- name: Ensure VM properties are correct + assert: + that: + - azure_vm.properties.diagnosticsProfile.bootDiagnostics.enabled + - azure_vm.properties.diagnosticsProfile.bootDiagnostics.storageUri is defined + - azure_vm.properties.instanceView.bootDiagnostics.consoleScreenshotBlobUri is defined + - azure_vm.properties.instanceView.bootDiagnostics.serialConsoleLogBlobUri is defined + +- name: Get facts for virtual machine with boot diagnostics enabled + azure_rm_virtualmachine_info: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + register: vm_facts_boot_diag_result + +- name: Ensure VM facts were returned + assert: + that: + - vm_facts_boot_diag_result.vms != [] + - vm_facts_boot_diag_result.vms[0].boot_diagnostics.enabled + - vm_facts_boot_diag_result.vms[0].boot_diagnostics.storage_uri is defined + - vm_facts_boot_diag_result.vms[0].boot_diagnostics.console_screenshot_uri is defined + - vm_facts_boot_diag_result.vms[0].boot_diagnostics.serial_console_log_uri is defined + +- name: Change the boot diagnostics storage account while enabled + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + boot_diagnostics: + enabled: yes + storage_account: "{{ storage_account }}" + ignore_errors: yes + +- name: Disable boot diagnostics and change the storage account at the same time + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + boot_diagnostics: + enabled: no + storage_account: "{{ storage_account }}" + +- name: Ensure boot diagnostics was disabled + assert: + that: + - not azure_vm.properties.diagnosticsProfile.bootDiagnostics.enabled + +- name: Re-enable boot diagnostics on an existing VM where it was previously configured + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + boot_diagnostics: + enabled: yes + register: reenable_boot_diag_result + +- name: Ensure boot diagnostics was reenabled + assert: + that: + - azure_vm.properties.diagnosticsProfile.bootDiagnostics.enabled + - azure_vm.properties.diagnosticsProfile.bootDiagnostics.storageUri is defined + - azure_vm.properties.instanceView.bootDiagnostics.consoleScreenshotBlobUri is defined + - azure_vm.properties.instanceView.bootDiagnostics.serialConsoleLogBlobUri is defined + +# - add_host: +# name: new_azure_vm +# ansible_host: '{{ reenable_boot_diag_result.ansible_facts.azure_vm.properties.networkProfile.networkInterfaces[0].properties.ipConfigurations[0].properties.publicIPAddress.properties.ipAddress }}' +# ansible_connection: paramiko # not guaranteed to have sshpass... +# ansible_user: adminuser +# ansible_password: Password123! +# ansible_host_key_checking: no + +# - name: wait for SSH port to be open +# wait_for: +# host: '{{ hostvars["new_azure_vm"].ansible_host }}' +# port: 22 +# timeout: 60 +# state: started + +# # TODO: figure out how to make this work under ansible-test with the coverage injector +# - block: +# - name: wait for host to answer on SSH +# delegate_to: new_azure_vm +# wait_for_connection: + +# - name: get content from custom_data script +# raw: cat /tmp/custom_data.txt +# register: custom_data_content + +# - name: assert contents +# assert: +# that: custom_data_content.stdout | regex_search('custom_data was executed') +# delegate_to: new_azure_vm + +# # TODO: figure out how to make this work under ansible-test with the coverage injector +# - name: wait for file/content created by custom_data script +# delegate_to: new_azure_vm +# vars: +# ansible_python_interpreter: python +# wait_for: +# path: /tmp/custom_data.txt +# search_regex: ^custom_data was executed$ +# timeout: 20 + +- name: Should be idempotent with a single NIC + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + vm_size: Standard_B1ms + storage_account: "{{ storage_account }}" + storage_container: "{{ vm_name }}" + storage_blob: "{{ vm_name }}.vhd" + admin_username: adminuser + ssh_password_enabled: false + ssh_public_keys: + - path: /home/adminuser/.ssh/authorized_keys + key_data: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfoYlIV4lTPZTv7hXaVwQQuqBgGs4yeNRX0SPo2+HQt9u4X7IGwrtXc0nEUm6LfaCikMH58bOL8f20NTGz285kxdFHZRcBXtqmnMz2rXwhK9gwq5h1khc+GzHtdcJXsGA4y0xuaNcidcg04jxAlN/06fwb/VYwwWTVbypNC0gpGEpWckCNm8vlDlA55sU5et0SZ+J0RKVvEaweUOeNbFZqckGPA384imfeYlADppK/7eAxqfBVadVvZG8IJk4yvATgaIENIFj2cXxqu2mQ/Bp5Wr45uApvJsFXmi+v/nkiOEV1QpLOnEwAZo6EfFS4CCQtsymxJCl1PxdJ5LD4ZOtP xiuxi.sun@qq.com" + short_hostname: testvm + os_type: Linux + network_interfaces: "{{ interface_name }}" + image: + offer: UbuntuServer + publisher: Canonical + sku: 16.04-LTS + version: latest + register: single_nic_result + +- name: Ensure nothing changed + assert: + that: single_nic_result is not changed + +- name: Resize VM + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + vm_size: Standard_A1 + storage_account: "{{ storage_account }}" + storage_container: "{{ vm_name }}" + storage_blob: "{{ vm_name }}.vhd" + admin_username: adminuser + ssh_password_enabled: false + ssh_public_keys: + - path: /home/adminuser/.ssh/authorized_keys + key_data: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfoYlIV4lTPZTv7hXaVwQQuqBgGs4yeNRX0SPo2+HQt9u4X7IGwrtXc0nEUm6LfaCikMH58bOL8f20NTGz285kxdFHZRcBXtqmnMz2rXwhK9gwq5h1khc+GzHtdcJXsGA4y0xuaNcidcg04jxAlN/06fwb/VYwwWTVbypNC0gpGEpWckCNm8vlDlA55sU5et0SZ+J0RKVvEaweUOeNbFZqckGPA384imfeYlADppK/7eAxqfBVadVvZG8IJk4yvATgaIENIFj2cXxqu2mQ/Bp5Wr45uApvJsFXmi+v/nkiOEV1QpLOnEwAZo6EfFS4CCQtsymxJCl1PxdJ5LD4ZOtP xiuxi.sun@qq.com" + short_hostname: testvm + os_type: Linux + network_interfaces: "{{ interface_name }}" + image: + offer: UbuntuServer + publisher: Canonical + sku: 16.04-LTS + version: latest + register: resize_result + +- name: Esure VM was resized + assert: + that: + - resize_result is changed + - resize_result.ansible_facts.azure_vm.properties.hardwareProfile.vmSize == "Standard_A1" + +- name: Delete VM + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + state: absent + vm_size: Standard_B1ms + +- name: NIC should be gone + azure_rm_networkinterface_info: + resource_group: "{{ resource_group }}" + name: "{{ interface_name }}" + register: output + +- name: Ensure NIC was removed + assert: + that: output.networkinterfaces | length == 0 + +- name: Public IP should be gone + azure_rm_publicipaddress_info: + resource_group: "{{ resource_group }}" + name: "{{ public_ip_name }}" + register: output + +- name: Ensure public IP was removed + assert: + that: output.publicipaddresses | length == 0 + +- name: Destroy NIC + azure_rm_networkinterface: + resource_group: "{{ resource_group }}" + name: "{{ interface_name }}" + state: absent + +- name: Destroy security group + azure_rm_securitygroup: + resource_group: "{{ resource_group }}" + name: "{{ security_group }}" + state: absent + +- name: Destroy subnet + azure_rm_subnet: + resource_group: "{{ resource_group }}" + virtual_network: "{{ network_name }}" + name: "{{ subnet_name }}" + state: absent + +- name: Destroy virtual network + azure_rm_virtualnetwork: + resource_group: "{{ resource_group }}" + name: "{{ network_name }}" + state: absent + +- name: Destroy public ip + azure_rm_publicipaddress: + resource_group: "{{ resource_group }}" + name: "{{ public_ip_name }}" + state: absent + +- name: Destroy availability set + azure_rm_availabilityset: + resource_group: "{{ resource_group }}" + name: "{{ availability_set }}" + state: absent + +- name: Destroy storage account + azure_rm_storageaccount: + resource_group: "{{ resource_group }}" + name: "{{ storage_account }}" + force_delete_nonempty: true + state: absent diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_spot.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_spot.yml new file mode 100644 index 000000000..a967e050d --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_spot.yml @@ -0,0 +1,83 @@ +- include_tasks: setup.yml + +- name: Create minimal VM with Spot Instance default values + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + priority: Spot + eviction_policy: Deallocate + admin_username: "testuser" + ssh_password_enabled: false + ssh_public_keys: + - path: /home/testuser/.ssh/authorized_keys + key_data: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfoYlIV4lTPZTv7hXaVwQQuqBgGs4yeNRX0SPo2+HQt9u4X7IGwrtXc0nEUm6LfaCikMH58bOL8f20NTGz285kxdFHZRcBXtqmnMz2rXwhK9gwq5h1khc+GzHtdcJXsGA4y0xuaNcidcg04jxAlN/06fwb/VYwwWTVbypNC0gpGEpWckCNm8vlDlA55sU5et0SZ+J0RKVvEaweUOeNbFZqckGPA384imfeYlADppK/7eAxqfBVadVvZG8IJk4yvATgaIENIFj2cXxqu2mQ/Bp5Wr45uApvJsFXmi+v/nkiOEV1QpLOnEwAZo6EfFS4CCQtsymxJCl1PxdJ5LD4ZOtP xiuxi.sun@qq.com" + vm_size: Standard_A1_v2 + virtual_network: "{{ network_name }}" + image: + offer: CentOS + publisher: OpenLogic + sku: '7.1' + version: latest + register: vm_output + +- name: Ensure VM was created using Spot Instance default values + assert: + that: + - azure_vm.properties.priority == 'Spot' + - azure_vm.properties.evictionPolicy == 'Deallocate' + - azure_vm.properties.billingProfile.maxPrice == -1.0 + +- name: Delete VM + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + state: absent + vm_size: Standard_A1_v2 + +- name: Create minimal VM with custom Spot Instance values + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + priority: Spot + eviction_policy: Delete + max_price: 1.0 + admin_username: "testuser" + ssh_password_enabled: false + ssh_public_keys: + - path: /home/testuser/.ssh/authorized_keys + key_data: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfoYlIV4lTPZTv7hXaVwQQuqBgGs4yeNRX0SPo2+HQt9u4X7IGwrtXc0nEUm6LfaCikMH58bOL8f20NTGz285kxdFHZRcBXtqmnMz2rXwhK9gwq5h1khc+GzHtdcJXsGA4y0xuaNcidcg04jxAlN/06fwb/VYwwWTVbypNC0gpGEpWckCNm8vlDlA55sU5et0SZ+J0RKVvEaweUOeNbFZqckGPA384imfeYlADppK/7eAxqfBVadVvZG8IJk4yvATgaIENIFj2cXxqu2mQ/Bp5Wr45uApvJsFXmi+v/nkiOEV1QpLOnEwAZo6EfFS4CCQtsymxJCl1PxdJ5LD4ZOtP xiuxi.sun@qq.com" + vm_size: Standard_A1_v2 + virtual_network: "{{ network_name }}" + image: + offer: CentOS + publisher: OpenLogic + sku: '7.1' + version: latest + register: vm_output + +- name: Ensure VM was created using custom spot instance values + assert: + that: + - azure_vm.properties.priority == 'Spot' + - azure_vm.properties.evictionPolicy == 'Delete' + - azure_vm.properties.billingProfile.maxPrice == 1.0 + +- name: Delete VM + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + state: absent + vm_size: Standard_A1_v2 + +- name: Destroy subnet + azure_rm_subnet: + resource_group: "{{ resource_group }}" + virtual_network: "{{ network_name }}" + name: "{{ subnet_name }}" + state: absent + +- name: Destroy virtual network + azure_rm_virtualnetwork: + resource_group: "{{ resource_group }}" + name: "{{ network_name }}" + state: absent \ No newline at end of file diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/setup.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/setup.yml new file mode 100644 index 000000000..f053cac03 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/setup.yml @@ -0,0 +1,26 @@ +- debug: + msg: "UID is {{ uid_short }}" + +- name: SETUP | Create storage account + azure_rm_storageaccount: + resource_group: "{{ resource_group }}" + name: "{{ storage_account }}" + account_type: Standard_LRS + +- name: SETUP | Create availability set + azure_rm_availabilityset: + name: "{{ availability_set }}" + resource_group: "{{ resource_group }}" + +- name: SETUP | Create virtual network + azure_rm_virtualnetwork: + resource_group: "{{ resource_group }}" + name: "{{ network_name }}" + address_prefixes: "{{ network }}" + +- name: SETUP | Create subnet + azure_rm_subnet: + resource_group: "{{ resource_group }}" + name: "{{ subnet_name }}" + address_prefix: "{{ subnet }}" + virtual_network: "{{ network_name }}" diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachineextension/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachineextension/aliases new file mode 100644 index 000000000..69418ea2c --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachineextension/aliases @@ -0,0 +1,5 @@ +cloud/azure +shippable/azure/group5 +destructive +unsupported +azure_rm_virtualmachineextension_facts diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachineextension/files/test-protected-settings.json b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachineextension/files/test-protected-settings.json new file mode 100644 index 000000000..0957f325c --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachineextension/files/test-protected-settings.json @@ -0,0 +1,13 @@ +{ + "storageAccountName": "dummystorageaccount", + "storageAccountSasToken": "dummy-storage-sas-token", + "sinksConfig": { + "sink": [ + { + "name": "TestEventHub", + "type": "EventHub", + "sasURL": "dummy-sas-url" + } + ] + } +} diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachineextension/files/test-public-settings.json b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachineextension/files/test-public-settings.json new file mode 100644 index 000000000..0e81bb6e2 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachineextension/files/test-public-settings.json @@ -0,0 +1,750 @@ +{ + "StorageAccount": "dummystorageaccount", + "ladCfg": { + "diagnosticMonitorConfiguration": { + "eventVolume": "Medium", + "metrics": { + "metricAggregation": [ + { + "scheduledTransferPeriod": "PT1M" + }, + { + "scheduledTransferPeriod": "PT1H" + } + ], + "resourceId": "dummyresourceid" + }, + "performanceCounters": { + "performanceCounterConfiguration": [ + { + "annotation": [ + { + "displayName": "CPU IO wait time", + "locale": "en-us" + } + ], + "class": "processor", + "condition": "IsAggregate=TRUE", + "counter": "percentiowaittime", + "counterSpecifier": "/builtin/processor/percentiowaittime", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "Percent" + }, + { + "annotation": [ + { + "displayName": "CPU user time", + "locale": "en-us" + } + ], + "class": "processor", + "condition": "IsAggregate=TRUE", + "counter": "percentusertime", + "counterSpecifier": "/builtin/processor/percentusertime", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "Percent" + }, + { + "annotation": [ + { + "displayName": "CPU nice time", + "locale": "en-us" + } + ], + "class": "processor", + "condition": "IsAggregate=TRUE", + "counter": "percentnicetime", + "counterSpecifier": "/builtin/processor/percentnicetime", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "Percent" + }, + { + "annotation": [ + { + "displayName": "CPU percentage guest OS", + "locale": "en-us" + } + ], + "class": "processor", + "condition": "IsAggregate=TRUE", + "counter": "percentprocessortime", + "counterSpecifier": "/builtin/processor/percentprocessortime", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "Percent" + }, + { + "annotation": [ + { + "displayName": "CPU interrupt time", + "locale": "en-us" + } + ], + "class": "processor", + "condition": "IsAggregate=TRUE", + "counter": "percentinterrupttime", + "counterSpecifier": "/builtin/processor/percentinterrupttime", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "Percent" + }, + { + "annotation": [ + { + "displayName": "CPU idle time", + "locale": "en-us" + } + ], + "class": "processor", + "condition": "IsAggregate=TRUE", + "counter": "percentidletime", + "counterSpecifier": "/builtin/processor/percentidletime", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "Percent" + }, + { + "annotation": [ + { + "displayName": "CPU privileged time", + "locale": "en-us" + } + ], + "class": "processor", + "condition": "IsAggregate=TRUE", + "counter": "percentprivilegedtime", + "counterSpecifier": "/builtin/processor/percentprivilegedtime", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "Percent" + }, + { + "annotation": [ + { + "displayName": "Memory available", + "locale": "en-us" + } + ], + "class": "memory", + "counter": "availablememory", + "counterSpecifier": "/builtin/memory/availablememory", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "Bytes" + }, + { + "annotation": [ + { + "displayName": "Swap percent used", + "locale": "en-us" + } + ], + "class": "memory", + "counter": "percentusedswap", + "counterSpecifier": "/builtin/memory/percentusedswap", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "Percent" + }, + { + "annotation": [ + { + "displayName": "Memory used", + "locale": "en-us" + } + ], + "class": "memory", + "counter": "usedmemory", + "counterSpecifier": "/builtin/memory/usedmemory", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "Bytes" + }, + { + "annotation": [ + { + "displayName": "Page reads", + "locale": "en-us" + } + ], + "class": "memory", + "counter": "pagesreadpersec", + "counterSpecifier": "/builtin/memory/pagesreadpersec", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "CountPerSecond" + }, + { + "annotation": [ + { + "displayName": "Swap available", + "locale": "en-us" + } + ], + "class": "memory", + "counter": "availableswap", + "counterSpecifier": "/builtin/memory/availableswap", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "Bytes" + }, + { + "annotation": [ + { + "displayName": "Swap percent available", + "locale": "en-us" + } + ], + "class": "memory", + "counter": "percentavailableswap", + "counterSpecifier": "/builtin/memory/percentavailableswap", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "Percent" + }, + { + "annotation": [ + { + "displayName": "Mem. percent available", + "locale": "en-us" + } + ], + "class": "memory", + "counter": "percentavailablememory", + "counterSpecifier": "/builtin/memory/percentavailablememory", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "Percent" + }, + { + "annotation": [ + { + "displayName": "Pages", + "locale": "en-us" + } + ], + "class": "memory", + "counter": "pagespersec", + "counterSpecifier": "/builtin/memory/pagespersec", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "CountPerSecond" + }, + { + "annotation": [ + { + "displayName": "Swap used", + "locale": "en-us" + } + ], + "class": "memory", + "counter": "usedswap", + "counterSpecifier": "/builtin/memory/usedswap", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "Bytes" + }, + { + "annotation": [ + { + "displayName": "Memory percentage", + "locale": "en-us" + } + ], + "class": "memory", + "counter": "percentusedmemory", + "counterSpecifier": "/builtin/memory/percentusedmemory", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "Percent" + }, + { + "annotation": [ + { + "displayName": "Page writes", + "locale": "en-us" + } + ], + "class": "memory", + "counter": "pageswrittenpersec", + "counterSpecifier": "/builtin/memory/pageswrittenpersec", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "CountPerSecond" + }, + { + "annotation": [ + { + "displayName": "Network in guest OS", + "locale": "en-us" + } + ], + "class": "network", + "counter": "bytesreceived", + "counterSpecifier": "/builtin/network/bytesreceived", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "Bytes" + }, + { + "annotation": [ + { + "displayName": "Network total bytes", + "locale": "en-us" + } + ], + "class": "network", + "counter": "bytestotal", + "counterSpecifier": "/builtin/network/bytestotal", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "Bytes" + }, + { + "annotation": [ + { + "displayName": "Network out guest OS", + "locale": "en-us" + } + ], + "class": "network", + "counter": "bytestransmitted", + "counterSpecifier": "/builtin/network/bytestransmitted", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "Bytes" + }, + { + "annotation": [ + { + "displayName": "Network collisions", + "locale": "en-us" + } + ], + "class": "network", + "counter": "totalcollisions", + "counterSpecifier": "/builtin/network/totalcollisions", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "Count" + }, + { + "annotation": [ + { + "displayName": "Packets received errors", + "locale": "en-us" + } + ], + "class": "network", + "counter": "totalrxerrors", + "counterSpecifier": "/builtin/network/totalrxerrors", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "Count" + }, + { + "annotation": [ + { + "displayName": "Packets sent", + "locale": "en-us" + } + ], + "class": "network", + "counter": "packetstransmitted", + "counterSpecifier": "/builtin/network/packetstransmitted", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "Count" + }, + { + "annotation": [ + { + "displayName": "Packets received", + "locale": "en-us" + } + ], + "class": "network", + "counter": "packetsreceived", + "counterSpecifier": "/builtin/network/packetsreceived", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "Count" + }, + { + "annotation": [ + { + "displayName": "Packets sent errors", + "locale": "en-us" + } + ], + "class": "network", + "counter": "totaltxerrors", + "counterSpecifier": "/builtin/network/totaltxerrors", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "Count" + }, + { + "annotation": [ + { + "displayName": "Filesystem transfers/sec", + "locale": "en-us" + } + ], + "class": "filesystem", + "condition": "IsAggregate=TRUE", + "counter": "transferspersecond", + "counterSpecifier": "/builtin/filesystem/transferspersecond", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "CountPerSecond" + }, + { + "annotation": [ + { + "displayName": "Filesystem % free space", + "locale": "en-us" + } + ], + "class": "filesystem", + "condition": "IsAggregate=TRUE", + "counter": "percentfreespace", + "counterSpecifier": "/builtin/filesystem/percentfreespace", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "Percent" + }, + { + "annotation": [ + { + "displayName": "Filesystem % used space", + "locale": "en-us" + } + ], + "class": "filesystem", + "condition": "IsAggregate=TRUE", + "counter": "percentusedspace", + "counterSpecifier": "/builtin/filesystem/percentusedspace", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "Percent" + }, + { + "annotation": [ + { + "displayName": "Filesystem used space", + "locale": "en-us" + } + ], + "class": "filesystem", + "condition": "IsAggregate=TRUE", + "counter": "usedspace", + "counterSpecifier": "/builtin/filesystem/usedspace", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "Bytes" + }, + { + "annotation": [ + { + "displayName": "Filesystem read bytes/sec", + "locale": "en-us" + } + ], + "class": "filesystem", + "condition": "IsAggregate=TRUE", + "counter": "bytesreadpersecond", + "counterSpecifier": "/builtin/filesystem/bytesreadpersecond", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "CountPerSecond" + }, + { + "annotation": [ + { + "displayName": "Filesystem free space", + "locale": "en-us" + } + ], + "class": "filesystem", + "condition": "IsAggregate=TRUE", + "counter": "freespace", + "counterSpecifier": "/builtin/filesystem/freespace", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "Bytes" + }, + { + "annotation": [ + { + "displayName": "Filesystem % free inodes", + "locale": "en-us" + } + ], + "class": "filesystem", + "condition": "IsAggregate=TRUE", + "counter": "percentfreeinodes", + "counterSpecifier": "/builtin/filesystem/percentfreeinodes", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "Percent" + }, + { + "annotation": [ + { + "displayName": "Filesystem bytes/sec", + "locale": "en-us" + } + ], + "class": "filesystem", + "condition": "IsAggregate=TRUE", + "counter": "bytespersecond", + "counterSpecifier": "/builtin/filesystem/bytespersecond", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "BytesPerSecond" + }, + { + "annotation": [ + { + "displayName": "Filesystem reads/sec", + "locale": "en-us" + } + ], + "class": "filesystem", + "condition": "IsAggregate=TRUE", + "counter": "readspersecond", + "counterSpecifier": "/builtin/filesystem/readspersecond", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "CountPerSecond" + }, + { + "annotation": [ + { + "displayName": "Filesystem write bytes/sec", + "locale": "en-us" + } + ], + "class": "filesystem", + "condition": "IsAggregate=TRUE", + "counter": "byteswrittenpersecond", + "counterSpecifier": "/builtin/filesystem/byteswrittenpersecond", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "CountPerSecond" + }, + { + "annotation": [ + { + "displayName": "Filesystem writes/sec", + "locale": "en-us" + } + ], + "class": "filesystem", + "condition": "IsAggregate=TRUE", + "counter": "writespersecond", + "counterSpecifier": "/builtin/filesystem/writespersecond", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "CountPerSecond" + }, + { + "annotation": [ + { + "displayName": "Filesystem % used inodes", + "locale": "en-us" + } + ], + "class": "filesystem", + "condition": "IsAggregate=TRUE", + "counter": "percentusedinodes", + "counterSpecifier": "/builtin/filesystem/percentusedinodes", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "Percent" + }, + { + "annotation": [ + { + "displayName": "Disk read guest OS", + "locale": "en-us" + } + ], + "class": "disk", + "condition": "IsAggregate=TRUE", + "counter": "readbytespersecond", + "counterSpecifier": "/builtin/disk/readbytespersecond", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "BytesPerSecond" + }, + { + "annotation": [ + { + "displayName": "Disk writes", + "locale": "en-us" + } + ], + "class": "disk", + "condition": "IsAggregate=TRUE", + "counter": "writespersecond", + "counterSpecifier": "/builtin/disk/writespersecond", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "CountPerSecond" + }, + { + "annotation": [ + { + "displayName": "Disk transfer time", + "locale": "en-us" + } + ], + "class": "disk", + "condition": "IsAggregate=TRUE", + "counter": "averagetransfertime", + "counterSpecifier": "/builtin/disk/averagetransfertime", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "Seconds" + }, + { + "annotation": [ + { + "displayName": "Disk transfers", + "locale": "en-us" + } + ], + "class": "disk", + "condition": "IsAggregate=TRUE", + "counter": "transferspersecond", + "counterSpecifier": "/builtin/disk/transferspersecond", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "CountPerSecond" + }, + { + "annotation": [ + { + "displayName": "Disk write guest OS", + "locale": "en-us" + } + ], + "class": "disk", + "condition": "IsAggregate=TRUE", + "counter": "writebytespersecond", + "counterSpecifier": "/builtin/disk/writebytespersecond", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "BytesPerSecond" + }, + { + "annotation": [ + { + "displayName": "Disk read time", + "locale": "en-us" + } + ], + "class": "disk", + "condition": "IsAggregate=TRUE", + "counter": "averagereadtime", + "counterSpecifier": "/builtin/disk/averagereadtime", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "Seconds" + }, + { + "annotation": [ + { + "displayName": "Disk write time", + "locale": "en-us" + } + ], + "class": "disk", + "condition": "IsAggregate=TRUE", + "counter": "averagewritetime", + "counterSpecifier": "/builtin/disk/averagewritetime", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "Seconds" + }, + { + "annotation": [ + { + "displayName": "Disk total bytes", + "locale": "en-us" + } + ], + "class": "disk", + "condition": "IsAggregate=TRUE", + "counter": "bytespersecond", + "counterSpecifier": "/builtin/disk/bytespersecond", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "BytesPerSecond" + }, + { + "annotation": [ + { + "displayName": "Disk reads", + "locale": "en-us" + } + ], + "class": "disk", + "condition": "IsAggregate=TRUE", + "counter": "readspersecond", + "counterSpecifier": "/builtin/disk/readspersecond", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "CountPerSecond" + }, + { + "annotation": [ + { + "displayName": "Disk queue length", + "locale": "en-us" + } + ], + "class": "disk", + "condition": "IsAggregate=TRUE", + "counter": "averagediskqueuelength", + "counterSpecifier": "/builtin/disk/averagediskqueuelength", + "sampleRate": "PT15S", + "type": "builtin", + "unit": "Count" + } + ] + }, + "syslogEvents": { + "sinks": "TestEventHub", + "syslogEventConfiguration": { + "LOG_AUTH": "LOG_DEBUG", + "LOG_AUTHPRIV": "LOG_DEBUG", + "LOG_CRON": "LOG_DEBUG", + "LOG_DAEMON": "LOG_DEBUG", + "LOG_FTP": "LOG_DEBUG", + "LOG_KERN": "LOG_DEBUG", + "LOG_LOCAL0": "LOG_DEBUG", + "LOG_LOCAL1": "LOG_DEBUG", + "LOG_LOCAL2": "LOG_DEBUG", + "LOG_LOCAL3": "LOG_DEBUG", + "LOG_LOCAL4": "LOG_DEBUG", + "LOG_LOCAL5": "LOG_DEBUG", + "LOG_LOCAL6": "LOG_DEBUG", + "LOG_LOCAL7": "LOG_DEBUG", + "LOG_LPR": "LOG_DEBUG", + "LOG_MAIL": "LOG_DEBUG", + "LOG_NEWS": "LOG_DEBUG", + "LOG_SYSLOG": "LOG_DEBUG", + "LOG_USER": "LOG_DEBUG", + "LOG_UUCP": "LOG_DEBUG" + } + } + }, + "sampleRateInSeconds": 15 + } +} diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachineextension/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachineextension/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachineextension/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachineextension/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachineextension/tasks/main.yml new file mode 100644 index 000000000..a4a1e62ee --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachineextension/tasks/main.yml @@ -0,0 +1,270 @@ +- name: Create Random Storage Account Name + set_fact: + storage_account: "vme{{ resource_group | hash('md5') | truncate(21, True, '') }}" + public_settings_file: "files/test-public-settings.json" + protected_settings_file: "files/test-protected-settings.json" + +- name: Create virtual network + azure_rm_virtualnetwork: + resource_group: "{{ resource_group }}" + name: testVnet1 + address_prefixes: "10.0.0.0/16" + +- name: Add subnet + azure_rm_subnet: + resource_group: "{{ resource_group }}" + name: testSubnet + address_prefix: "10.0.1.0/24" + virtual_network: testVnet1 + +- name: Create public IP address + azure_rm_publicipaddress: + resource_group: "{{ resource_group }}" + allocation_method: Dynamic + name: testPublicIP + +- name: Create Network Security Group that allows SSH + azure_rm_securitygroup: + resource_group: "{{ resource_group }}" + name: testNetworkSecurityGroup + rules: + - name: SSH + protocol: Tcp + destination_port_range: 22 + access: Allow + priority: 1001 + direction: Inbound + +- name: Create virtual network interface card + azure_rm_networkinterface: + resource_group: "{{ resource_group }}" + name: testNIC + virtual_network: testVnet1 + subnet: testSubnet + public_ip_name: testPublicIP + security_group_name: testNetworkSecurityGroup + +- name: create a storage account + azure_rm_storageaccount: + resource_group: "{{ resource_group }}" + name: "{{ storage_account }}" + type: Standard_LRS + +- name: Create VM + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: testVM + vm_size: Standard_DS1_v2 + admin_username: testuser + ssh_password_enabled: false + ssh_public_keys: + - path: /home/testuser/.ssh/authorized_keys + key_data: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfoYlIV4lTPZTv7hXaVwQQuqBgGs4yeNRX0SPo2+HQt9u4X7IGwrtXc0nEUm6LfaCikMH58bOL8f20NTGz285kxdFHZRcBXtqmnMz2rXwhK9gwq5h1khc+GzHtdcJXsGA4y0xuaNcidcg04jxAlN/06fwb/VYwwWTVbypNC0gpGEpWckCNm8vlDlA55sU5et0SZ+J0RKVvEaweUOeNbFZqckGPA384imfeYlADppK/7eAxqfBVadVvZG8IJk4yvATgaIENIFj2cXxqu2mQ/Bp5Wr45uApvJsFXmi+v/nkiOEV1QpLOnEwAZo6EfFS4CCQtsymxJCl1PxdJ5LD4ZOtP xiuxi.sun@qq.com" + network_interfaces: testNIC + managed_disk_type: StandardSSD_LRS + storage_account_name: "{{ storage_account }}" + storage_container: osdisk + storage_blob: osdisk.vhd + os_disk_caching: ReadWrite + image: + offer: 0001-com-ubuntu-server-focal + publisher: canonical + sku: 20_04-lts-gen2 + version: latest + +- name: Create VM Extension + azure_rm_virtualmachineextension: + resource_group: "{{ resource_group }}" + name: testVMExtension + virtual_machine_name: testVM + publisher: Microsoft.Azure.Extensions + virtual_machine_extension_type: CustomScript + type_handler_version: 2.0 + auto_upgrade_minor_version: true + settings: {"commandToExecute": "date"} + register: results +- name: Assert that VM Extension ran + assert: + that: results.changed + +- name: Query extension + azure_rm_virtualmachineextension_info: + resource_group: "{{ resource_group }}" + name: testVMExtension + virtual_machine_name: testVM + register: results +- name: Assert that facts are returned + assert: + that: + - results.changed == False + - results.extensions[0]['id'] != None + - results.extensions[0]['resource_group'] != None + - results.extensions[0]['virtual_machine_name'] != None + - results.extensions[0]['name'] != None + - results.extensions[0]['location'] != None + - results.extensions[0]['publisher'] != None + - results.extensions[0]['type'] != None + - results.extensions[0]['settings'] != None + - results.extensions[0]['auto_upgrade_minor_version'] != None + - results.extensions[0]['provisioning_state'] != None + +- name: Create VM Extension (idempotent) + azure_rm_virtualmachineextension: + resource_group: "{{ resource_group }}" + name: testVMExtension + virtual_machine_name: testVM + publisher: Microsoft.Azure.Extensions + virtual_machine_extension_type: CustomScript + type_handler_version: 2.0 + auto_upgrade_minor_version: true + settings: {"commandToExecute": "date"} + register: results +- name: Assert no updates + assert: + that: not results.changed + +- name: Create VM Extension (force update) + azure_rm_virtualmachineextension: + resource_group: "{{ resource_group }}" + name: testVMExtension + virtual_machine_name: testVM + publisher: Microsoft.Azure.Extensions + virtual_machine_extension_type: CustomScript + type_handler_version: 2.0 + auto_upgrade_minor_version: true + settings: {"commandToExecute": "date"} + force_update_tag: true + register: results +- name: Assert updates + assert: + that: results.changed + +- name: List extensions + azure_rm_virtualmachineextension_info: + resource_group: "{{ resource_group }}" + virtual_machine_name: testVM + register: results +- name: Assert that facts are returned + assert: + that: + - results.changed == False + - results.extensions[0]['id'] != None + - results.extensions[0]['resource_group'] != None + - results.extensions[0]['virtual_machine_name'] != None + - results.extensions[0]['name'] != None + - results.extensions[0]['location'] != None + - results.extensions[0]['publisher'] != None + - results.extensions[0]['type'] != None + - results.extensions[0]['settings'] != None + - results.extensions[0]['auto_upgrade_minor_version'] != None + - results.extensions[0]['provisioning_state'] != None + +- name: Delete VM Extension + azure_rm_virtualmachineextension: + resource_group: "{{ resource_group }}" + name: testVMExtension + virtual_machine_name: testVM + state: absent + register: results +- name: Assert that VM Extension deleted + assert: + that: results.changed + +- name: Delete VM Extension (idempotent) + azure_rm_virtualmachineextension: + resource_group: "{{ resource_group }}" + name: testVMExtension + virtual_machine_name: testVM + state: absent + register: results +- name: Assert no changes + assert: + that: not results.changed + +- name: Create VM Extension to configure python required for VM diagnostic extension + azure_rm_virtualmachineextension: + resource_group: "{{ resource_group }}" + name: python-install + virtual_machine_name: testVM + publisher: Microsoft.Azure.Extensions + virtual_machine_extension_type: CustomScript + type_handler_version: 2.0 + auto_upgrade_minor_version: true + settings: {"commandToExecute": "apt-get update && apt-get install -y python2 && update-alternatives --install /usr/bin/python python /usr/bin/python2 1"} + register: results +- name: Assert that VM Extension ran + assert: + that: results.changed + +- name: Install VM Extension for diagnostics + azure_rm_virtualmachineextension: + resource_group: "{{ resource_group }}" + name: "linux-diagnostics" + virtual_machine_name: testVM + publisher: "Microsoft.Azure.Diagnostics" + virtual_machine_extension_type: "LinuxDiagnostic" + type_handler_version: "4.0" + auto_upgrade_minor_version: true + settings: "{{ lookup('file', public_settings_file) }}" + protected_settings: "{{ lookup('file', protected_settings_file) }}" + register: results +- name: Assert extension installed + assert: + that: results.changed + +- name: Install VM Extension for diagnostics (idempotent) + azure_rm_virtualmachineextension: + resource_group: "{{ resource_group }}" + name: "linux-diagnostics" + virtual_machine_name: testVM + publisher: "Microsoft.Azure.Diagnostics" + virtual_machine_extension_type: "LinuxDiagnostic" + type_handler_version: "4.0" + auto_upgrade_minor_version: true + settings: "{{ lookup('file', public_settings_file) }}" + protected_settings: "{{ lookup('file', protected_settings_file) }}" + register: results +- name: Assert no updates + assert: + that: not results.changed + +- name: List extensions + azure_rm_virtualmachineextension_info: + resource_group: "{{ resource_group }}" + virtual_machine_name: testVM + register: results +- name: Assert that facts are returned + assert: + that: + - results.changed == False + - results.extensions | length >= 2 + - "'python-install' in results.extensions | map(attribute='name')" + - "'linux-diagnostics' in results.extensions | map(attribute='name')" + +- name: Delete VM + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: testVM + state: absent + remove_on_absent: ['all'] + +- name: Delete a storage account + azure_rm_storageaccount: + resource_group: "{{ resource_group }}" + name: "{{ storage_account }}" + type: Standard_LRS + state: absent + force_delete_nonempty: true + +- name: Delete Network Security Group that allows SSH + azure_rm_securitygroup: + resource_group: "{{ resource_group }}" + name: testNetworkSecurityGroup + state: absent + +- name: Delete virtual network + azure_rm_virtualnetwork: + resource_group: "{{ resource_group }}" + name: testVnet1 + state: absent + address_prefixes: "10.0.0.0/16" diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachineimage_info/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachineimage_info/aliases new file mode 100644 index 000000000..9175999b4 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachineimage_info/aliases @@ -0,0 +1,4 @@ +cloud/azure +shippable/azure/group3 +shippable/azure/smoketest +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachineimage_info/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachineimage_info/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachineimage_info/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachineimage_info/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachineimage_info/tasks/main.yml new file mode 100644 index 000000000..88fd5ce19 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachineimage_info/tasks/main.yml @@ -0,0 +1,55 @@ +- name: set location + set_fact: + location: eastus + +- name: Get facts for a specific image + azure_rm_virtualmachineimage_info: + location: "{{ location }}" + publisher: OpenLogic + offer: CentOS + sku: '7.5' + version: '7.5.201808150' + register: output + +- assert: + that: output['vmimages'] | length == 1 + +- name: List available versions + azure_rm_virtualmachineimage_info: + location: "{{ location }}" + publisher: OpenLogic + offer: CentOS + sku: '7.5' + register: output + +- assert: + that: output['vmimages'] | length > 0 + +- name: List available offers + azure_rm_virtualmachineimage_info: + location: "{{ location }}" + publisher: OpenLogic + register: output + +- assert: + that: output['vmimages'] | length > 0 + +- name: List available publishers + azure_rm_virtualmachineimage_info: + location: "{{ location }}" + register: output + +- assert: + that: output['vmimages'] | length > 0 + +- name: Get facts for a specific image's latest version + azure_rm_virtualmachineimage_info: + location: "{{ location }}" + publisher: OpenLogic + offer: CentOS + sku: '7.5' + version: 'latest' + register: output + +- assert: + that: output['vmimages'] | length == 1 \ No newline at end of file diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachinescaleset/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachinescaleset/aliases new file mode 100644 index 000000000..557e95692 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachinescaleset/aliases @@ -0,0 +1,7 @@ +cloud/azure +shippable/azure/group6 +destructive +azure_rm_virtualmachinescaleset_facts +azure_rm_virtualmachinescalesetinstance_facts +azure_rm_virtualmachinescalesetextension +azure_rm_virtualmachinescalesetextension_facts diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachinescaleset/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachinescaleset/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachinescaleset/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachinescaleset/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachinescaleset/tasks/main.yml new file mode 100644 index 000000000..d027146c2 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachinescaleset/tasks/main.yml @@ -0,0 +1,897 @@ +- name: Prepare random number + set_fact: + rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + run_once: yes + +- name: Create virtual network + azure_rm_virtualnetwork: + resource_group: "{{ resource_group }}" + name: VMSStestVnet + address_prefixes: "10.0.0.0/16" + +- name: Add subnet + azure_rm_subnet: + resource_group: "{{ resource_group }}" + name: VMSStestSubnet + address_prefix: "10.0.1.0/24" + virtual_network: VMSStestVnet + +- name: Create public IP address + azure_rm_publicipaddress: + resource_group: "{{ resource_group }}" + allocation_method: Static + name: testPublicIP + +- name: Create load balancer + azure_rm_loadbalancer: + resource_group: "{{ resource_group }}" + name: testLB + public_ip_address_name: testPublicIP + +- name: Create public IP address 1 + azure_rm_publicipaddress: + resource_group: "{{ resource_group }}" + allocation_method: Static + name: testPublicIP1 + +- name: Create load balancer 1 + azure_rm_loadbalancer: + resource_group: "{{ resource_group }}" + name: testLB1 + public_ip_address_name: testPublicIP1 + +- name: Create network security group within same resource group of VMSS. + azure_rm_securitygroup: + resource_group: "{{ resource_group }}" + name: testNetworkSecurityGroup + +- name: Create network security group in different resource group of VMSS. + azure_rm_securitygroup: + resource_group: "{{ resource_group_secondary }}" + name: testNetworkSecurityGroup2 + +- name: Create virtual network inteface cards for VM A and B + azure_rm_networkinterface: + resource_group: "{{ resource_group }}" + name: "vmforimage{{ rpfx }}nic" + virtual_network: VMSStestVnet + subnet: VMSStestSubnet + +- name: Create VM + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "vmforimage{{ rpfx }}" + admin_username: testuser + ssh_password_enabled: false + ssh_public_keys: + - path: /home/testuser/.ssh/authorized_keys + key_data: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfoYlIV4lTPZTv7hXaVwQQuqBgGs4yeNRX0SPo2+HQt9u4X7IGwrtXc0nEUm6LfaCikMH58bOL8f20NTGz285kxdFHZRcBXtqmnMz2rXwhK9gwq5h1khc+GzHtdcJXsGA4y0xuaNcidcg04jxAlN/06fwb/VYwwWTVbypNC0gpGEpWckCNm8vlDlA55sU5et0SZ+J0RKVvEaweUOeNbFZqckGPA384imfeYlADppK/7eAxqfBVadVvZG8IJk4yvATgaIENIFj2cXxqu2mQ/Bp5Wr45uApvJsFXmi+v/nkiOEV1QpLOnEwAZo6EfFS4CCQtsymxJCl1PxdJ5LD4ZOtP xiuxi.sun@qq.com" + vm_size: Standard_B1ms + network_interfaces: "vmforimage{{ rpfx }}nic" + image: + offer: UbuntuServer + publisher: Canonical + sku: 18.04-LTS + version: latest +- name: Get VM facts + azure_rm_virtualmachine_info: + resource_group: "{{ resource_group }}" + name: "vmforimage{{ rpfx }}" + register: vm_state +- name: Pause for 10 mimutes to VM updating + shell: sleep 600 +- name: Generalize VM + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "vmforimage{{ rpfx }}" + generalized: yes +- name: Create image A + azure_rm_image: + resource_group: "{{ resource_group }}" + name: testimagea + source: "vmforimage{{ rpfx }}" +- name: Create image B + azure_rm_image: + resource_group: "{{ resource_group }}" + name: testimageb + source: "vmforimage{{ rpfx }}" +- name: Delete VM + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "vmforimage{{ rpfx }}" + state: absent + +- name: Create VMSS with I(orchestration_mode=Flexible) + azure_rm_virtualmachinescaleset: + resource_group: "{{ resource_group }}" + name: testVMSS{{ rpfx }} + vm_size: Standard_A1_v2 + admin_username: testuser + single_placement_group: False + platform_fault_domain_count: 1 + public_ip_per_vm: True + ssh_password_enabled: false + ssh_public_keys: + - path: /home/testuser/.ssh/authorized_keys + key_data: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfoYlIV4lTPZTv7hXaVwQQuqBgGs4yeNRX0SPo2+HQt9u4X7IGwrtXc0nEUm6LfaCikMH58bOL8f20NTGz285kxdFHZRcBXtqmnMz2rXwhK9gwq5h1khc+GzHtdcJXsGA4y0xuaNcidcg04jxAlN/06fwb/VYwwWTVbypNC0gpGEpWckCNm8vlDlA55sU5et0SZ+J0RKVvEaweUOeNbFZqckGPA384imfeYlADppK/7eAxqfBVadVvZG8IJk4yvATgaIENIFj2cXxqu2mQ/Bp5Wr45uApvJsFXmi+v/nkiOEV1QpLOnEwAZo6EfFS4CCQtsymxJCl1PxdJ5LD4ZOtP xiuxi.sun@qq.com" + virtual_network_name: VMSStestVnet + subnet_name: VMSStestSubnet + managed_disk_type: Standard_LRS + orchestration_mode: Flexible + os_disk_caching: ReadWrite + image: + offer: UbuntuServer + publisher: Canonical + sku: 18.04-LTS + version: latest + data_disks: + - lun: 0 + disk_size_gb: 64 + caching: ReadWrite + managed_disk_type: Standard_LRS + register: results + +- name: Assert that VMSS can be created + assert: + that: results.changed + +- name: Create VMSS with I(orchestration_mode=Flexible) again --- Idempotent + azure_rm_virtualmachinescaleset: + resource_group: "{{ resource_group }}" + name: testVMSS{{ rpfx }} + vm_size: Standard_A1_v2 + admin_username: testuser + single_placement_group: False + platform_fault_domain_count: 1 + public_ip_per_vm: True + ssh_password_enabled: false + ssh_public_keys: + - path: /home/testuser/.ssh/authorized_keys + key_data: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfoYlIV4lTPZTv7hXaVwQQuqBgGs4yeNRX0SPo2+HQt9u4X7IGwrtXc0nEUm6LfaCikMH58bOL8f20NTGz285kxdFHZRcBXtqmnMz2rXwhK9gwq5h1khc+GzHtdcJXsGA4y0xuaNcidcg04jxAlN/06fwb/VYwwWTVbypNC0gpGEpWckCNm8vlDlA55sU5et0SZ+J0RKVvEaweUOeNbFZqckGPA384imfeYlADppK/7eAxqfBVadVvZG8IJk4yvATgaIENIFj2cXxqu2mQ/Bp5Wr45uApvJsFXmi+v/nkiOEV1QpLOnEwAZo6EfFS4CCQtsymxJCl1PxdJ5LD4ZOtP xiuxi.sun@qq.com" + virtual_network_name: VMSStestVnet + subnet_name: VMSStestSubnet + managed_disk_type: Standard_LRS + orchestration_mode: Flexible + os_disk_caching: ReadWrite + image: + offer: UbuntuServer + publisher: Canonical + sku: 18.04-LTS + version: latest + data_disks: + - lun: 0 + disk_size_gb: 64 + caching: ReadWrite + managed_disk_type: Standard_LRS + register: results + +- name: Assert that VMSS can be created + assert: + that: not results.changed + +- name: Retrieve scaleset VMSS fact + azure_rm_virtualmachinescaleset_info: + name: testVMSS{{ rpfx }} + resource_group: "{{ resource_group }}" + register: output_scaleset + +- assert: + that: + - output_scaleset.vmss[0].properties.orchestrationMode == "Flexible" + +- name: Delete VMSS + azure_rm_virtualmachinescaleset: + resource_group: "{{ resource_group }}" + name: testVMSS{{ rpfx }} + state: absent + +- name: Create VMSS with Spot Instance default value + azure_rm_virtualmachinescaleset: + resource_group: "{{ resource_group }}" + name: testVMSS{{ rpfx }} + vm_size: Standard_A1_v2 + admin_username: testuser + priority: Spot + eviction_policy: Deallocate + single_placement_group: True + ssh_password_enabled: false + ssh_public_keys: + - path: /home/testuser/.ssh/authorized_keys + key_data: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfoYlIV4lTPZTv7hXaVwQQuqBgGs4yeNRX0SPo2+HQt9u4X7IGwrtXc0nEUm6LfaCikMH58bOL8f20NTGz285kxdFHZRcBXtqmnMz2rXwhK9gwq5h1khc+GzHtdcJXsGA4y0xuaNcidcg04jxAlN/06fwb/VYwwWTVbypNC0gpGEpWckCNm8vlDlA55sU5et0SZ+J0RKVvEaweUOeNbFZqckGPA384imfeYlADppK/7eAxqfBVadVvZG8IJk4yvATgaIENIFj2cXxqu2mQ/Bp5Wr45uApvJsFXmi+v/nkiOEV1QpLOnEwAZo6EfFS4CCQtsymxJCl1PxdJ5LD4ZOtP xiuxi.sun@qq.com" + capacity: 1 + virtual_network_name: VMSStestVnet + subnet_name: VMSStestSubnet + upgrade_policy: Manual + tier: Standard + managed_disk_type: Standard_LRS + os_disk_caching: ReadWrite + image: + offer: UbuntuServer + publisher: Canonical + sku: 18.04-LTS + version: latest + data_disks: + - lun: 0 + disk_size_gb: 64 + caching: ReadWrite + managed_disk_type: Standard_LRS + register: results + +- name: Assert that VMSS was created using Spot Instance default values + assert: + that: + - results.ansible_facts.azure_vmss.properties.virtualMachineProfile.priority == 'Spot' + - results.ansible_facts.azure_vmss.properties.virtualMachineProfile.evictionPolicy == 'Deallocate' + - results.ansible_facts.azure_vmss.properties.virtualMachineProfile.billingProfile.maxPrice == -1.0 + +- name: Delete VMSS + azure_rm_virtualmachinescaleset: + resource_group: "{{ resource_group }}" + name: testVMSS{{ rpfx }} + state: absent + +- name: Create VMSS with custom Spot Instance values + azure_rm_virtualmachinescaleset: + resource_group: "{{ resource_group }}" + name: testVMSS{{ rpfx }} + vm_size: Standard_A1_v2 + admin_username: testuser + priority: Spot + eviction_policy: Delete + max_price: 1.0 + ssh_password_enabled: false + ssh_public_keys: + - path: /home/testuser/.ssh/authorized_keys + key_data: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfoYlIV4lTPZTv7hXaVwQQuqBgGs4yeNRX0SPo2+HQt9u4X7IGwrtXc0nEUm6LfaCikMH58bOL8f20NTGz285kxdFHZRcBXtqmnMz2rXwhK9gwq5h1khc+GzHtdcJXsGA4y0xuaNcidcg04jxAlN/06fwb/VYwwWTVbypNC0gpGEpWckCNm8vlDlA55sU5et0SZ+J0RKVvEaweUOeNbFZqckGPA384imfeYlADppK/7eAxqfBVadVvZG8IJk4yvATgaIENIFj2cXxqu2mQ/Bp5Wr45uApvJsFXmi+v/nkiOEV1QpLOnEwAZo6EfFS4CCQtsymxJCl1PxdJ5LD4ZOtP xiuxi.sun@qq.com" + capacity: 1 + virtual_network_name: VMSStestVnet + subnet_name: VMSStestSubnet + upgrade_policy: Manual + tier: Standard + managed_disk_type: Standard_LRS + os_disk_caching: ReadWrite + single_placement_group: True + image: + offer: UbuntuServer + publisher: Canonical + sku: 18.04-LTS + version: latest + data_disks: + - lun: 0 + disk_size_gb: 64 + caching: ReadWrite + managed_disk_type: Standard_LRS + register: results + +- name: Assert that VMSS was created using Spot Instance custom values + assert: + that: + - results.ansible_facts.azure_vmss.properties.virtualMachineProfile.priority == 'Spot' + - results.ansible_facts.azure_vmss.properties.virtualMachineProfile.evictionPolicy == 'Delete' + - results.ansible_facts.azure_vmss.properties.virtualMachineProfile.billingProfile.maxPrice == 1.0 + +- name: Delete VMSS + azure_rm_virtualmachinescaleset: + resource_group: "{{ resource_group }}" + name: testVMSS{{ rpfx }} + state: absent + +- name: Create VMSS (check mode) + azure_rm_virtualmachinescaleset: + resource_group: "{{ resource_group }}" + name: testVMSS{{ rpfx }} + vm_size: Standard_B1s + admin_username: testuser + ssh_password_enabled: false + ssh_public_keys: + - path: /home/testuser/.ssh/authorized_keys + key_data: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfoYlIV4lTPZTv7hXaVwQQuqBgGs4yeNRX0SPo2+HQt9u4X7IGwrtXc0nEUm6LfaCikMH58bOL8f20NTGz285kxdFHZRcBXtqmnMz2rXwhK9gwq5h1khc+GzHtdcJXsGA4y0xuaNcidcg04jxAlN/06fwb/VYwwWTVbypNC0gpGEpWckCNm8vlDlA55sU5et0SZ+J0RKVvEaweUOeNbFZqckGPA384imfeYlADppK/7eAxqfBVadVvZG8IJk4yvATgaIENIFj2cXxqu2mQ/Bp5Wr45uApvJsFXmi+v/nkiOEV1QpLOnEwAZo6EfFS4CCQtsymxJCl1PxdJ5LD4ZOtP xiuxi.sun@qq.com" + capacity: 1 + virtual_network_name: VMSStestVnet + subnet_name: VMSStestSubnet + load_balancer: testLB + upgrade_policy: Manual + tier: Standard + managed_disk_type: Standard_LRS + os_disk_caching: ReadWrite + single_placement_group: True + image: + offer: UbuntuServer + publisher: Canonical + sku: 18.04-LTS + version: latest + data_disks: + - lun: 0 + disk_size_gb: 64 + caching: ReadWrite + managed_disk_type: Standard_LRS + register: results + check_mode: yes + +- name: Assert that VMSS can be created + assert: + that: results.changed + +- name: Get VMSS to assert no VMSS is created in check mode + azure_rm_virtualmachinescaleset_info: + resource_group: "{{ resource_group }}" + name: testVMSS{{ rpfx }} + format: curated + register: output_scaleset + +- name: Assert no VMSS created in check mode + assert: + that: + - output_scaleset.vmss | length == 0 + +- name: Create VMSS + azure_rm_virtualmachinescaleset: + resource_group: "{{ resource_group }}" + name: testVMSS{{ rpfx }} + vm_size: Standard_B1s + admin_username: testuser + ssh_password_enabled: false + ssh_public_keys: + - path: /home/testuser/.ssh/authorized_keys + key_data: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfoYlIV4lTPZTv7hXaVwQQuqBgGs4yeNRX0SPo2+HQt9u4X7IGwrtXc0nEUm6LfaCikMH58bOL8f20NTGz285kxdFHZRcBXtqmnMz2rXwhK9gwq5h1khc+GzHtdcJXsGA4y0xuaNcidcg04jxAlN/06fwb/VYwwWTVbypNC0gpGEpWckCNm8vlDlA55sU5et0SZ+J0RKVvEaweUOeNbFZqckGPA384imfeYlADppK/7eAxqfBVadVvZG8IJk4yvATgaIENIFj2cXxqu2mQ/Bp5Wr45uApvJsFXmi+v/nkiOEV1QpLOnEwAZo6EfFS4CCQtsymxJCl1PxdJ5LD4ZOtP xiuxi.sun@qq.com" + capacity: 1 + virtual_network_name: VMSStestVnet + subnet_name: VMSStestSubnet + upgrade_policy: Manual + load_balancer: testLB + tier: Standard + managed_disk_type: Standard_LRS + os_disk_caching: ReadWrite + custom_data: "#cloud-config" + single_placement_group: True + image: + offer: UbuntuServer + publisher: Canonical + sku: 18.04-LTS + version: latest + data_disks: + - lun: 0 + disk_size_gb: 64 + caching: ReadWrite + managed_disk_type: Standard_LRS + scale_in_policy: "NewestVM" + register: results + +- name: Assert that VMSS was created + assert: + that: results.changed + +- name: Create VMSS -- test upgrade_policy idempotence and load balancer + azure_rm_virtualmachinescaleset: + resource_group: "{{ resource_group }}" + name: testVMSS{{ rpfx }} + vm_size: Standard_B1s + admin_username: testuser + ssh_password_enabled: false + ssh_public_keys: + - path: /home/testuser/.ssh/authorized_keys + key_data: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfoYlIV4lTPZTv7hXaVwQQuqBgGs4yeNRX0SPo2+HQt9u4X7IGwrtXc0nEUm6LfaCikMH58bOL8f20NTGz285kxdFHZRcBXtqmnMz2rXwhK9gwq5h1khc+GzHtdcJXsGA4y0xuaNcidcg04jxAlN/06fwb/VYwwWTVbypNC0gpGEpWckCNm8vlDlA55sU5et0SZ+J0RKVvEaweUOeNbFZqckGPA384imfeYlADppK/7eAxqfBVadVvZG8IJk4yvATgaIENIFj2cXxqu2mQ/Bp5Wr45uApvJsFXmi+v/nkiOEV1QpLOnEwAZo6EfFS4CCQtsymxJCl1PxdJ5LD4ZOtP xiuxi.sun@qq.com" + capacity: 1 + virtual_network_name: VMSStestVnet + subnet_name: VMSStestSubnet + upgrade_policy: Automatic + load_balancer: testLB1 + tier: Standard + managed_disk_type: Standard_LRS + os_disk_caching: ReadWrite + custom_data: "#cloud-config" + single_placement_group: True + image: + offer: UbuntuServer + publisher: Canonical + sku: 18.04-LTS + version: latest + data_disks: + - lun: 0 + disk_size_gb: 64 + caching: ReadWrite + managed_disk_type: Standard_LRS + register: results + +- name: Assert that VMSS was created + assert: + that: results.changed + +- name: Retrieve scaleset facts + azure_rm_virtualmachinescaleset_info: + resource_group: "{{ resource_group }}" + name: testVMSS{{ rpfx }} + format: curated + register: output_scaleset + +- assert: + that: + - output_scaleset.vmss[0].load_balancer == "testLB1" + +- name: Retrieve scaleset VMs facts + azure_rm_virtualmachinescalesetinstance_info: + resource_group: "{{ resource_group }}" + vmss_name: testVMSS{{ rpfx }} + register: instances + +- name: Assert that facts returned correctly + assert: + that: + - instances.instances | length == 1 + - instances.instances[0].id != None + - instances.instances[0].name != None + - instances.instances[0].instance_id != None + - instances.instances[0].provisioning_state != None + - instances.instances[0].vm_id != None + - instances.instances[0].latest_model != None + - instances.instances[0].power_state != None + +- name: Get scaleset body + set_fact: + body: "{{ output_scaleset.vmss[0] }}" + +- name: Try to update VMSS using output as input + azure_rm_virtualmachinescaleset: + resource_group: "{{ body.resource_group }}" + name: "{{ body.name }}" + vm_size: "{{ body.vm_size }}" + admin_username: "{{ body.admin_username }}" + ssh_password_enabled: "{{ body.ssh_password_enabled }}" + ssh_public_keys: + - path: /home/testuser/.ssh/authorized_keys + key_data: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfoYlIV4lTPZTv7hXaVwQQuqBgGs4yeNRX0SPo2+HQt9u4X7IGwrtXc0nEUm6LfaCikMH58bOL8f20NTGz285kxdFHZRcBXtqmnMz2rXwhK9gwq5h1khc+GzHtdcJXsGA4y0xuaNcidcg04jxAlN/06fwb/VYwwWTVbypNC0gpGEpWckCNm8vlDlA55sU5et0SZ+J0RKVvEaweUOeNbFZqckGPA384imfeYlADppK/7eAxqfBVadVvZG8IJk4yvATgaIENIFj2cXxqu2mQ/Bp5Wr45uApvJsFXmi+v/nkiOEV1QpLOnEwAZo6EfFS4CCQtsymxJCl1PxdJ5LD4ZOtP xiuxi.sun@qq.com" + capacity: "{{ body.capacity }}" + virtual_network_name: "{{ body.virtual_network_name }}" + subnet_name: "{{ body.subnet_name }}" + upgrade_policy: "{{ body.upgrade_policy.mode }}" + load_balancer: "{{ body.load_balancer }}" + tier: "{{ body.tier }}" + managed_disk_type: "{{ body.managed_disk_type }}" + os_disk_caching: "{{ body.os_disk_caching }}" + image: "{{ body.image }}" + data_disks: "{{ body.data_disks }}" + overprovision: "{{ body.overprovision }}" + single_placement_group: True + register: results + +- name: Assert that nothing was changed + assert: + that: not results.changed + +- name: Install VMSS Extension + azure_rm_virtualmachinescalesetextension: + resource_group: "{{ resource_group }}" + vmss_name: testVMSS{{ rpfx }} + name: testExtension + publisher: Microsoft.Azure.Extensions + type: CustomScript + type_handler_version: 2.0 + auto_upgrade_minor_version: true + settings: {"commandToExecute": "sudo apt-get -y install apache2"} + register: results + +- name: Assert that something was changed + assert: + that: results.changed + +- name: Install Again VMSS Extension - again + azure_rm_virtualmachinescalesetextension: + resource_group: "{{ resource_group }}" + vmss_name: testVMSS{{ rpfx }} + name: testExtension + publisher: Microsoft.Azure.Extensions + type: CustomScript + type_handler_version: 2.0 + auto_upgrade_minor_version: true + settings: {"commandToExecute": "sudo apt-get -y install apache2"} + register: results + +- name: Assert that nothing was changed + assert: + that: not results.changed + +- name: Query extension + azure_rm_virtualmachinescalesetextension_info: + resource_group: "{{ resource_group }}" + vmss_name: testVMSS{{ rpfx }} + name: testExtension + register: results + +- name: Assert that facts are returned + assert: + that: + - results.changed == False + - results.extensions[0]['id'] != None + - results.extensions[0]['resource_group'] != None + - results.extensions[0]['vmss_name'] != None + - results.extensions[0]['name'] != None + - results.extensions[0]['publisher'] != None + - results.extensions[0]['type'] != None + - results.extensions[0]['settings'] != None + - results.extensions[0]['auto_upgrade_minor_version'] != None + - results.extensions[0]['provisioning_state'] != None + +- name: List extensions + azure_rm_virtualmachinescalesetextension_info: + resource_group: "{{ resource_group }}" + vmss_name: testVMSS{{ rpfx }} + register: results + +- name: Assert that facts are returned + assert: + that: + - results.changed == False + - results.extensions[0]['id'] != None + - results.extensions[0]['resource_group'] != None + - results.extensions[0]['vmss_name'] != None + - results.extensions[0]['name'] != None + - results.extensions[0]['publisher'] != None + - results.extensions[0]['type'] != None + - results.extensions[0]['settings'] != None + - results.extensions[0]['auto_upgrade_minor_version'] != None + - results.extensions[0]['provisioning_state'] != None + +- name: Delete VMSS Extension + azure_rm_virtualmachinescalesetextension: + resource_group: "{{ resource_group }}" + vmss_name: testVMSS{{ rpfx }} + name: testExtension + state: absent + register: results + +- name: Assert that change was reported + assert: + that: results.changed + +- name: Upgrade instance to the latest image + azure_rm_virtualmachinescalesetinstance: + resource_group: "{{ resource_group }}" + vmss_name: testVMSS{{ rpfx }} + instance_id: "{{ instances.instances[0].instance_id }}" + latest_model: yes + register: results + +- name: Assert that something has changed + assert: + that: results.changed + +- name: Stop virtual machine + azure_rm_virtualmachinescalesetinstance: + resource_group: "{{ resource_group }}" + vmss_name: testVMSS{{ rpfx }} + instance_id: "{{ instances.instances[0].instance_id }}" + power_state: stopped + register: results + +- name: Assert that something has changed + assert: + that: results.changed + +- name: Delete instance + azure_rm_virtualmachinescalesetinstance: + resource_group: "{{ resource_group }}" + vmss_name: testVMSS{{ rpfx }} + instance_id: "{{ instances.instances[0].instance_id }}" + state: absent + register: results + +- name: Assert that something has changed + assert: + that: results.changed + +- name: Delete VMSS + azure_rm_virtualmachinescaleset: + resource_group: "{{ resource_group }}" + name: testVMSS{{ rpfx }} + state: absent + +- name: Create VMSS with security group in same resource group, with accelerated networking(check mode). + azure_rm_virtualmachinescaleset: + resource_group: "{{ resource_group }}" + name: testVMSS{{ rpfx }}2 + vm_size: Standard_D3_v2 + capacity: 0 + virtual_network_name: VMSStestVnet + subnet_name: VMSStestSubnet + admin_username: testuser + ssh_password_enabled: false + ssh_public_keys: + - path: /home/testuser/.ssh/authorized_keys + key_data: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfoYlIV4lTPZTv7hXaVwQQuqBgGs4yeNRX0SPo2+HQt9u4X7IGwrtXc0nEUm6LfaCikMH58bOL8f20NTGz285kxdFHZRcBXtqmnMz2rXwhK9gwq5h1khc+GzHtdcJXsGA4y0xuaNcidcg04jxAlN/06fwb/VYwwWTVbypNC0gpGEpWckCNm8vlDlA55sU5et0SZ+J0RKVvEaweUOeNbFZqckGPA384imfeYlADppK/7eAxqfBVadVvZG8IJk4yvATgaIENIFj2cXxqu2mQ/Bp5Wr45uApvJsFXmi+v/nkiOEV1QpLOnEwAZo6EfFS4CCQtsymxJCl1PxdJ5LD4ZOtP xiuxi.sun@qq.com" + image: + name: testimagea + resource_group: "{{ resource_group }}" + upgrade_policy: Manual + security_group: testNetworkSecurityGroup + enable_accelerated_networking: yes + single_placement_group: True + register: results + check_mode: yes + +- name: Assert that VMSS can be created + assert: + that: results.changed + +- name: Create VMSS with security group in same resource group, with accelerated networking. + azure_rm_virtualmachinescaleset: + resource_group: "{{ resource_group }}" + name: testVMSS{{ rpfx }}2 + vm_size: Standard_D3_v2 + capacity: 0 + virtual_network_name: VMSStestVnet + subnet_name: VMSStestSubnet + admin_username: testuser + ssh_password_enabled: false + ssh_public_keys: + - path: /home/testuser/.ssh/authorized_keys + key_data: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfoYlIV4lTPZTv7hXaVwQQuqBgGs4yeNRX0SPo2+HQt9u4X7IGwrtXc0nEUm6LfaCikMH58bOL8f20NTGz285kxdFHZRcBXtqmnMz2rXwhK9gwq5h1khc+GzHtdcJXsGA4y0xuaNcidcg04jxAlN/06fwb/VYwwWTVbypNC0gpGEpWckCNm8vlDlA55sU5et0SZ+J0RKVvEaweUOeNbFZqckGPA384imfeYlADppK/7eAxqfBVadVvZG8IJk4yvATgaIENIFj2cXxqu2mQ/Bp5Wr45uApvJsFXmi+v/nkiOEV1QpLOnEwAZo6EfFS4CCQtsymxJCl1PxdJ5LD4ZOtP xiuxi.sun@qq.com" + image: + name: testimagea + resource_group: "{{ resource_group }}" + upgrade_policy: Manual + security_group: testNetworkSecurityGroup + enable_accelerated_networking: yes + single_placement_group: True + register: results + +- name: Assert that VMSS ran + assert: + that: + - 'results.changed' + - 'results.ansible_facts.azure_vmss.properties.virtualMachineProfile.networkProfile.networkInterfaceConfigurations.0.properties.enableAcceleratedNetworking == true' + - 'results.ansible_facts.azure_vmss.properties.virtualMachineProfile.networkProfile.networkInterfaceConfigurations.0.properties.networkSecurityGroup != {}' + +- name: Create VMSS with security group in same resource group, with accelerated networking. + azure_rm_virtualmachinescaleset: + resource_group: "{{ resource_group }}" + name: testVMSS{{ rpfx }}2 + vm_size: Standard_D3_v2 + capacity: 0 + virtual_network_name: VMSStestVnet + subnet_name: VMSStestSubnet + admin_username: testuser + ssh_password_enabled: false + ssh_public_keys: + - path: /home/testuser/.ssh/authorized_keys + key_data: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfoYlIV4lTPZTv7hXaVwQQuqBgGs4yeNRX0SPo2+HQt9u4X7IGwrtXc0nEUm6LfaCikMH58bOL8f20NTGz285kxdFHZRcBXtqmnMz2rXwhK9gwq5h1khc+GzHtdcJXsGA4y0xuaNcidcg04jxAlN/06fwb/VYwwWTVbypNC0gpGEpWckCNm8vlDlA55sU5et0SZ+J0RKVvEaweUOeNbFZqckGPA384imfeYlADppK/7eAxqfBVadVvZG8IJk4yvATgaIENIFj2cXxqu2mQ/Bp5Wr45uApvJsFXmi+v/nkiOEV1QpLOnEwAZo6EfFS4CCQtsymxJCl1PxdJ5LD4ZOtP xiuxi.sun@qq.com" + image: + name: testimagea + resource_group: "{{ resource_group }}" + upgrade_policy: Manual + security_group: testNetworkSecurityGroup + enable_accelerated_networking: yes + single_placement_group: True + register: results + +- name: Assert that nothing has changed + assert: + that: + - not results.changed + +- name: Create VMSS with security group in same resource group, with accelerated networking. + azure_rm_virtualmachinescaleset: + resource_group: "{{ resource_group }}" + name: testVMSS{{ rpfx }}2 + vm_size: Standard_D3_v2 + capacity: 0 + virtual_network_name: VMSStestVnet + subnet_name: VMSStestSubnet + admin_username: testuser + ssh_password_enabled: false + ssh_public_keys: + - path: /home/testuser/.ssh/authorized_keys + key_data: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfoYlIV4lTPZTv7hXaVwQQuqBgGs4yeNRX0SPo2+HQt9u4X7IGwrtXc0nEUm6LfaCikMH58bOL8f20NTGz285kxdFHZRcBXtqmnMz2rXwhK9gwq5h1khc+GzHtdcJXsGA4y0xuaNcidcg04jxAlN/06fwb/VYwwWTVbypNC0gpGEpWckCNm8vlDlA55sU5et0SZ+J0RKVvEaweUOeNbFZqckGPA384imfeYlADppK/7eAxqfBVadVvZG8IJk4yvATgaIENIFj2cXxqu2mQ/Bp5Wr45uApvJsFXmi+v/nkiOEV1QpLOnEwAZo6EfFS4CCQtsymxJCl1PxdJ5LD4ZOtP xiuxi.sun@qq.com" + image: + name: testimageb + resource_group: "{{ resource_group }}" + upgrade_policy: Manual + security_group: testNetworkSecurityGroup + enable_accelerated_networking: yes + single_placement_group: True + register: results + +- name: Assert that something has changed + assert: + that: + - results.changed + +- name: update VMSS with security group in different resource group. + azure_rm_virtualmachinescaleset: + resource_group: "{{ resource_group }}" + name: testVMSS{{ rpfx }}2 + vm_size: Standard_B1s + capacity: 0 + virtual_network_name: VMSStestVnet + subnet_name: VMSStestSubnet + admin_username: testuser + ssh_password_enabled: false + ssh_public_keys: + - path: /home/testuser/.ssh/authorized_keys + key_data: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfoYlIV4lTPZTv7hXaVwQQuqBgGs4yeNRX0SPo2+HQt9u4X7IGwrtXc0nEUm6LfaCikMH58bOL8f20NTGz285kxdFHZRcBXtqmnMz2rXwhK9gwq5h1khc+GzHtdcJXsGA4y0xuaNcidcg04jxAlN/06fwb/VYwwWTVbypNC0gpGEpWckCNm8vlDlA55sU5et0SZ+J0RKVvEaweUOeNbFZqckGPA384imfeYlADppK/7eAxqfBVadVvZG8IJk4yvATgaIENIFj2cXxqu2mQ/Bp5Wr45uApvJsFXmi+v/nkiOEV1QpLOnEwAZo6EfFS4CCQtsymxJCl1PxdJ5LD4ZOtP xiuxi.sun@qq.com" + image: + name: testimageb + resource_group: "{{ resource_group }}" + upgrade_policy: Manual + security_group: + name: testNetworkSecurityGroup2 + resource_group: "{{ resource_group_secondary }}" + single_placement_group: True + register: results + +# disable for now +#- name: Assert that security group is correct +# assert: +# that: +# - 'results.changed' +# - '"testNetworkSecurityGroup2" in results.ansible_facts.azure_vmss.properties.virtualMachineProfile.networkProfile.networkInterfaceConfigurations.0.properties.networkSecurityGroup.id' + +- name: Delete VMSS + azure_rm_virtualmachinescaleset: + resource_group: "{{ resource_group }}" + name: testVMSS{{ rpfx }}2 + state: absent + +- name: Create VMSS with ephmeral OS disk. + azure_rm_virtualmachinescaleset: + resource_group: "{{ resource_group }}" + name: testVMSS{{ rpfx }}3 + vm_size: Standard_DS2_v2 + capacity: 0 + virtual_network_name: VMSStestVnet + subnet_name: VMSStestSubnet + admin_username: testuser + ssh_password_enabled: false + ssh_public_keys: + - path: /home/testuser/.ssh/authorized_keys + key_data: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfoYlIV4lTPZTv7hXaVwQQuqBgGs4yeNRX0SPo2+HQt9u4X7IGwrtXc0nEUm6LfaCikMH58bOL8f20NTGz285kxdFHZRcBXtqmnMz2rXwhK9gwq5h1khc+GzHtdcJXsGA4y0xuaNcidcg04jxAlN/06fwb/VYwwWTVbypNC0gpGEpWckCNm8vlDlA55sU5et0SZ+J0RKVvEaweUOeNbFZqckGPA384imfeYlADppK/7eAxqfBVadVvZG8IJk4yvATgaIENIFj2cXxqu2mQ/Bp5Wr45uApvJsFXmi+v/nkiOEV1QpLOnEwAZo6EfFS4CCQtsymxJCl1PxdJ5LD4ZOtP xiuxi.sun@qq.com" + ephemeral_os_disk: True + image: + name: testimageb + resource_group: "{{ resource_group }}" + upgrade_policy: Manual + security_group: + name: testNetworkSecurityGroup2 + resource_group: "{{ resource_group_secondary }}" + single_placement_group: True + register: results + +- assert: + that: + - results.changed + - results.ansible_facts.azure_vmss.properties.virtualMachineProfile.storageProfile.osDisk.diffDiskSettings.option == 'Local' + +- name: Check VMSS ephmeral OS disk can't udpate + azure_rm_virtualmachinescaleset: + resource_group: "{{ resource_group }}" + name: testVMSS{{ rpfx }}3 + vm_size: Standard_DS2_v2 + capacity: 0 + virtual_network_name: VMSStestVnet + subnet_name: VMSStestSubnet + admin_username: testuser + ssh_password_enabled: false + ssh_public_keys: + - path: /home/testuser/.ssh/authorized_keys + key_data: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfoYlIV4lTPZTv7hXaVwQQuqBgGs4yeNRX0SPo2+HQt9u4X7IGwrtXc0nEUm6LfaCikMH58bOL8f20NTGz285kxdFHZRcBXtqmnMz2rXwhK9gwq5h1khc+GzHtdcJXsGA4y0xuaNcidcg04jxAlN/06fwb/VYwwWTVbypNC0gpGEpWckCNm8vlDlA55sU5et0SZ+J0RKVvEaweUOeNbFZqckGPA384imfeYlADppK/7eAxqfBVadVvZG8IJk4yvATgaIENIFj2cXxqu2mQ/Bp5Wr45uApvJsFXmi+v/nkiOEV1QpLOnEwAZo6EfFS4CCQtsymxJCl1PxdJ5LD4ZOtP xiuxi.sun@qq.com" + ephemeral_os_disk: False + image: + name: testimageb + resource_group: "{{ resource_group }}" + upgrade_policy: Manual + security_group: + name: testNetworkSecurityGroup2 + resource_group: "{{ resource_group_secondary }}" + single_placement_group: True + ignore_errors: yes + +- name: Delete VMSS + azure_rm_virtualmachinescaleset: + resource_group: "{{ resource_group }}" + name: testVMSS{{ rpfx }}3 + state: absent + +- name: Fail when instance type is not supported to enable accelerated networking + azure_rm_virtualmachinescaleset: + resource_group: "{{ resource_group }}" + name: testVMSS{{ rpfx }}4 + vm_size: Standard_B1s + virtual_network_name: VMSStestVnet + subnet_name: VMSStestSubnet + admin_username: testuser + ssh_password_enabled: false + ssh_public_keys: + - path: /home/testuser/.ssh/authorized_keys + key_data: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfoYlIV4lTPZTv7hXaVwQQuqBgGs4yeNRX0SPo2+HQt9u4X7IGwrtXc0nEUm6LfaCikMH58bOL8f20NTGz285kxdFHZRcBXtqmnMz2rXwhK9gwq5h1khc+GzHtdcJXsGA4y0xuaNcidcg04jxAlN/06fwb/VYwwWTVbypNC0gpGEpWckCNm8vlDlA55sU5et0SZ+J0RKVvEaweUOeNbFZqckGPA384imfeYlADppK/7eAxqfBVadVvZG8IJk4yvATgaIENIFj2cXxqu2mQ/Bp5Wr45uApvJsFXmi+v/nkiOEV1QpLOnEwAZo6EfFS4CCQtsymxJCl1PxdJ5LD4ZOtP xiuxi.sun@qq.com" + image: + offer: UbuntuServer + publisher: Canonical + sku: 18.04-LTS + version: latest + upgrade_policy: Manual + enable_accelerated_networking: yes + single_placement_group: True + register: results + ignore_errors: yes + +- name: Assert failure to show that accelerated networking is enabled only with supported instance types. + assert: + that: + - '"VMSizeIsNotPermittedToEnableAcceleratedNetworkingForVmss" in results.msg' + +- name: Delete VMSS + azure_rm_virtualmachinescaleset: + resource_group: "{{ resource_group }}" + name: testVMSS{{ rpfx }}4 + state: absent + +- name: Delete network security group + azure_rm_securitygroup: + resource_group: "{{ resource_group }}" + name: testNetworkSecurityGroup + state: absent + +- name: Delete network security group + azure_rm_securitygroup: + resource_group: "{{ resource_group_secondary }}" + name: testNetworkSecurityGroup2 + state: absent + +- name: Delete load balancer + azure_rm_loadbalancer: + resource_group: "{{ resource_group }}" + name: testLB + state: absent + +- name: Delete public IP address + azure_rm_publicipaddress: + resource_group: "{{ resource_group }}" + state: absent + name: testPublicIP + +- name: Delete virtual network + azure_rm_virtualnetwork: + resource_group: "{{ resource_group }}" + name: VMSStestVnet + state: absent + address_prefixes: "10.0.0.0/16" + +# TODO: Until we have a module to create/delete images this is the best tests +# I can do +- name: assert error thrown with invalid image dict + azure_rm_virtualmachinescaleset: + resource_group: "{{ resource_group }}" + name: testvm002 + vm_size: Standard_B1s + image: + offer: UbuntuServer + register: fail_invalid_image_dict + failed_when: 'fail_invalid_image_dict.msg != "parameter error: expecting image to contain [publisher, offer, sku, version], [name, resource_group] or [id]"' + +- name: assert error thrown with invalid image type + azure_rm_virtualmachinescaleset: + resource_group: "{{ resource_group }}" + name: testvm002 + vm_size: Standard_B1s + image: + - testing + register: fail_invalid_image_type + failed_when: 'fail_invalid_image_type.msg != "parameter error: expecting image to be a string or dict not list"' + +- name: assert error finding missing custom image + azure_rm_virtualmachinescaleset: + resource_group: "{{ resource_group }}" + name: testvm002 + vm_size: Standard_B1s + image: invalid-image + register: fail_missing_custom_image + failed_when: fail_missing_custom_image.msg != "Error could not find image with name invalid-image" + +- name: assert error finding missing custom image (dict style) + azure_rm_virtualmachinescaleset: + resource_group: "{{ resource_group }}" + name: testvm002 + vm_size: Standard_B1s + image: + name: invalid-image + register: fail_missing_custom_image_dict + failed_when: fail_missing_custom_image_dict.msg != "Error could not find image with name invalid-image" diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachinesize_info/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachinesize_info/aliases new file mode 100644 index 000000000..759eafa2d --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachinesize_info/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group3 +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachinesize_info/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachinesize_info/meta/main.yml new file mode 100644 index 000000000..48f5726d8 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachinesize_info/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure \ No newline at end of file diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachinesize_info/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachinesize_info/tasks/main.yml new file mode 100644 index 000000000..d3aad3f72 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachinesize_info/tasks/main.yml @@ -0,0 +1,20 @@ +- name: set location + set_fact: + location: eastus + +- name: Get specific size information for a specific location + azure_rm_virtualmachinesize_info: + location: "{{ location }}" + name: Standard_A1_v2 + register: output + +- assert: + that: output['sizes'] | length == 1 + +- name: Get available sizes for a specific location + azure_rm_virtualmachinesize_info: + location: "{{ location }}" + register: output + +- assert: + that: output['sizes'] | length > 0 diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetwork/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetwork/aliases new file mode 100644 index 000000000..aa77c071a --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetwork/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group2 +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetwork/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetwork/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetwork/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetwork/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetwork/tasks/main.yml new file mode 100644 index 000000000..143396720 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetwork/tasks/main.yml @@ -0,0 +1,187 @@ +- name: Prepare random number + set_fact: + vnetname: "vnet{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + +- name: Delete virtual network, if it exists + azure_rm_virtualnetwork: + name: "{{ vnetname }}" + resource_group: "{{ resource_group }}" + state: absent + +- name: Create virtual network + azure_rm_virtualnetwork: + name: "{{ vnetname }}" + address_prefixes_cidr: + - 10.1.0.0/16 + - 172.100.0.0/16 + - fdda:e69b:1587:495e::/64 + tags: + testing: testing + delete: on-exit + resource_group: "{{ resource_group }}" + +- name: Update virtual network with dns server + azure_rm_virtualnetwork: + flow_timeout_in_minutes: 8 + name: "{{ vnetname }}" + address_prefixes_cidr: + - 10.1.0.0/16 + - 172.100.0.0/16 + - fdda:e69b:1587:495e::/64 + dns_servers: + - 127.0.0.1 + - 127.0.0.3 + tags: + testing: testing + delete: on-exit + resource_group: "{{ resource_group }}" + register: output + +- assert: + that: + - "output.state.address_prefixes | length == 3" + - "output.state.flow_timeout_in_minutes == 8" + - "output.state.dns_servers | length == 2" + - "output.state.tags.delete == 'on-exit'" + - "output.state.tags | length == 2" + +- name: Attach a subnet + azure_rm_subnet: + resource_group: "{{ resource_group }}" + name: "{{ vnetname }}" + virtual_network_name: "{{ vnetname }}" + address_prefix_cidr: "10.1.0.0/24" + +- name: Gather facts by name, tags + azure_rm_virtualnetwork_info: + resource_group: "{{ resource_group }}" + name: "{{ vnetname }}" + tags: + - testing + register: facts + +- assert: + that: + - "facts.virtualnetworks | length == 1" + - "facts.virtualnetworks[0].dns_servers | length == 2" + - "facts.virtualnetworks[0].address_prefixes | length == 3" + - "facts.virtualnetworks[0].flow_timeout_in_minutes == 8" + - "facts.virtualnetworks[0].subnets | length == 1" + +- name: Gather facts by resource group, tags + azure_rm_virtualnetwork_info: + resource_group: "{{ resource_group }}" + tags: + - testing + register: facts + +- assert: + that: "facts.virtualnetworks | length >= 1" + +- name: Gather facts by tags + azure_rm_virtualnetwork_info: + tags: + - testing + register: facts + +- assert: + that: "facts.virtualnetworks | length >= 1" + +- name: Should be idempotent + azure_rm_virtualnetwork: + name: "{{ vnetname }}" + address_prefixes_cidr: + - 10.1.0.0/16 + - 172.100.0.0/16 + - fdda:e69b:1587:495e::/64 + dns_servers: + - 127.0.0.1 + - 127.0.0.3 + tags: + testing: testing + delete: on-exit + resource_group: "{{ resource_group }}" + register: output + +- assert: + that: not output.changed + +- name: Update tags + azure_rm_virtualnetwork: + name: "{{ vnetname }}" + tags: + testing: 'no' + delete: never + foo: bar + resource_group: "{{ resource_group }}" + register: output + +- assert: + that: output.state.tags | length == 3 + +- name: Purge tags + azure_rm_virtualnetwork: + name: "{{ vnetname }}" + append_tags: no + tags: + testing: 'always' + resource_group: "{{ resource_group }}" + register: output + +- assert: + that: + - output.state.tags | length == 1 + - output.state.tags.testing == 'always' + +- name: Should require address_prefixes_cidr when purge_address_prefixes + azure_rm_virtualnetwork: + name: "{{ vnetname }}" + purge_address_prefixes: true + resource_group: "{{ resource_group }}" + register: output + ignore_errors: yes + +- assert: + that: output.failed + +- name: Purge address prefixes + azure_rm_virtualnetwork: + name: "{{ vnetname }}" + address_prefixes_cidr: 10.1.0.0/16 + purge_address_prefixes: true + resource_group: "{{ resource_group }}" + register: output + +- assert: + that: + - output.state.address_prefixes | length == 1 + - output.state.address_prefixes[0] == '10.1.0.0/16' + - output.state.dns_servers | length == 2 + - output.state.dns_servers[0] == '127.0.0.1' + +- name: Purge DNS servers + azure_rm_virtualnetwork: + name: "{{ vnetname }}" + purge_dns_servers: true + resource_group: "{{ resource_group }}" + register: output + +- assert: + that: output.state['dns_servers'] is undefined + +- name: Gather facts + azure_rm_virtualnetwork_info: + resource_group: "{{ resource_group }}" + name: "{{ vnetname }}" + register: facts + +- assert: + that: + - facts.virtualnetworks | length == 1 + - "facts.virtualnetworks[0].subnets | length == 1" + +- name: Delete virtual network + azure_rm_virtualnetwork: + name: "{{ vnetname }}" + resource_group: "{{ resource_group }}" + state: absent diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetworkgateway/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetworkgateway/aliases new file mode 100644 index 000000000..6166f9737 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetworkgateway/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group14 +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetworkgateway/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetworkgateway/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetworkgateway/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetworkgateway/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetworkgateway/tasks/main.yml new file mode 100644 index 000000000..9dbd87e69 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetworkgateway/tasks/main.yml @@ -0,0 +1,218 @@ +# FIXME: needs minimal tests (check mode?) that can run quickly, VNG creation takes > 20min + +- name: Prepare random number + set_fact: + vnetname: "vnet{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + vngname: "vng{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + pubipname: "testPublicIP{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + +- name: Create virtual network gateway without bgp settings (check mode) + check_mode: yes + azure_rm_virtualnetworkgateway: + resource_group: "{{ resource_group }}" + name: "{{ vngname }}" + ip_configurations: + - name: testipconfig + private_ip_allocation_method: Dynamic + public_ip_address_name: "{{ pubipname }}" + virtual_network: "{{ vnetname }}" + tags: + common: "xyz" + register: output + +- assert: + that: output.changed + +- name: Create virtual network gateway Generation2 (check mode) + check_mode: yes + azure_rm_virtualnetworkgateway: + resource_group: "{{ resource_group }}" + name: "{{ vngname }}" + sku: VpnGw2 + vpn_gateway_generation: Generation2 + ip_configurations: + - name: testipconfig + private_ip_allocation_method: Dynamic + public_ip_address_name: "{{ pubipname }}" + virtual_network: "{{ vnetname }}" + tags: + common: "xyz" + +- assert: + that: output.changed + + +- name: long-running virtualnetworkgateway tests [run with `--tags long_run,untagged` to enable] + tags: [long_run, never] + block: + - name: Create virtual network + azure_rm_virtualnetwork: + resource_group: "{{ resource_group }}" + name: "{{ vnetname }}" + address_prefixes: "10.0.0.0/16" + + - name: Add subnet + azure_rm_subnet: + resource_group: "{{ resource_group }}" + name: GatewaySubnet + address_prefix: "10.0.2.0/24" + virtual_network: "{{ vnetname }}" + + - name: Create public IP address + azure_rm_publicipaddress: + resource_group: "{{ resource_group }}" + allocation_method: Dynamic + name: "{{ pubipname }}" + + - name: Create virtual network gateway without bgp settings + azure_rm_virtualnetworkgateway: + resource_group: "{{ resource_group }}" + name: "{{ vngname }}" + ip_configurations: + - name: testipconfig + private_ip_allocation_method: Dynamic + public_ip_address_name: testPublicIP + virtual_network: "{{ vnetname }}" + tags: + common: "xyz" + register: output + + - assert: + that: output.changed + + - name: Create virtual network gateway without bgp settings - idempotent + azure_rm_virtualnetworkgateway: + resource_group: "{{ resource_group }}" + name: "{{ vngname }}" + ip_configurations: + - name: testipconfig + private_ip_allocation_method: Dynamic + public_ip_address_name: "{{ pubipname }}" + virtual_network: "{{ vnetname }}" + tags: + common: "xyz" + register: output + + - assert: + that: not output.changed + + - name: Update virtual network gateway + azure_rm_virtualnetworkgateway: + resource_group: "{{ resource_group }}" + name: "{{ vngname }}" + ip_configurations: + - name: testipconfig + private_ip_allocation_method: Dynamic + public_ip_address_name: "{{ pubipname }}" + virtual_network: "{{ vnetname }}" + tags: + common: "mno" + register: output + - assert: + that: output.changed + + - name: Delete virtual network gateway + azure_rm_virtualnetworkgateway: + resource_group: "{{ resource_group }}" + name: "{{ vngname }}" + state: absent + register: output + - assert: + that: output.changed + +- name: long-running generation virtualnetworkgateway tests [run with `--tags long_run_gen,untagged` to enable] + tags: [long_run_gen, never] + block: + - name: Create virtual network + azure_rm_virtualnetwork: + resource_group: "{{ resource_group }}" + name: "{{ vnetname }}" + address_prefixes: "10.0.0.0/16" + + - name: Add subnet + azure_rm_subnet: + resource_group: "{{ resource_group }}" + name: GatewaySubnet + address_prefix: "10.0.2.0/24" + virtual_network: "{{ vnetname }}" + + - name: Create public IP address + azure_rm_publicipaddress: + resource_group: "{{ resource_group }}" + allocation_method: Dynamic + name: "{{ pubipname }}" + + - name: Create virtual network gateway w/ sku and Generation2 + azure_rm_virtualnetworkgateway: + resource_group: "{{ resource_group }}" + name: "{{ vngname }}" + sku: VpnGw2 + vpn_gateway_generation: Generation2 + ip_configurations: + - name: testipconfig + private_ip_allocation_method: Dynamic + public_ip_address_name: "{{ pubipname }}" + virtual_network: "{{ vnetname }}" + tags: + common: "xyz" + + - assert: + that: output.changed + + - name: Update virtual network gateway + azure_rm_virtualnetworkgateway: + resource_group: "{{ resource_group }}" + name: "{{ vngname }}" + sku: VpnGw2 + vpn_gateway_generation: Generation2 + ip_configurations: + - name: testipconfig + private_ip_allocation_method: Dynamic + public_ip_address_name: "{{ pubipname }}" + virtual_network: "{{ vnetname }}" + tags: + common: "mno" + + register: output + - assert: + that: output.changed + + - name: Delete virtual network gateway + azure_rm_virtualnetworkgateway: + resource_group: "{{ resource_group }}" + name: "{{ vngname }}" + state: absent + register: output + - assert: + that: output.changed + +- name: Delete virtual network gateway - idempotent + azure_rm_virtualnetworkgateway: + resource_group: "{{ resource_group }}" + name: "{{ vngname }}" + state: absent + register: output +- assert: + that: not output.changed + +# Clean up networking components after test +- name: Delete subnet + azure_rm_subnet: + resource_group: "{{ resource_group }}" + name: GatewaySubnet + virtual_network: "{{ vnetname }}" + state: absent + + +- name: Delete public IP address + azure_rm_publicipaddress: + resource_group: "{{ resource_group }}" + allocation_method: Dynamic + name: "{{ pubipname }}" + state: absent + +- name: Delete virtual network + azure_rm_virtualnetwork: + resource_group: "{{ resource_group }}" + name: "{{ vnetname }}" + state: absent diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetworkpeering/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetworkpeering/aliases new file mode 100644 index 000000000..5afc4b6f3 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetworkpeering/aliases @@ -0,0 +1,4 @@ +cloud/azure +shippable/azure/group14 +unsupported +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetworkpeering/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetworkpeering/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetworkpeering/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetworkpeering/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetworkpeering/tasks/main.yml new file mode 100644 index 000000000..bf7a4aa13 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetworkpeering/tasks/main.yml @@ -0,0 +1,126 @@ +- name: Prepare random number + set_fact: + vnetname1: "vnet1{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + vnetname2: "vnet2{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + peering_name: "peering1{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 100 | random }}" + +- name: Create first virtual network + azure_rm_virtualnetwork: + name: "{{ vnetname1 }}" + address_prefixes_cidr: + - 10.1.0.0/16 + tags: + testing: testing + delete: on-exit + resource_group: "{{ resource_group }}" + register: vnet1 + +- name: Create second virtual network + azure_rm_virtualnetwork: + name: "{{ vnetname2 }}" + address_prefixes_cidr: + - 10.2.0.0/24 + resource_group: "{{ resource_group_secondary }}" + register: vnet2 + +- assert: + that: + - vnet1.changed + - vnet2.changed + +- name: Create virtual network peering (check mode) + azure_rm_virtualnetworkpeering: + resource_group: "{{ resource_group }}" + name: "{{ peering_name }}" + virtual_network: "{{ vnetname1 }}" + remote_virtual_network: + resource_group: "{{ resource_group_secondary }}" + name: "{{ vnetname2 }}" + allow_virtual_network_access: false + allow_forwarded_traffic: true + check_mode: yes + register: output + +- assert: + that: output.changed + +- name: Create virtual network peering + azure_rm_virtualnetworkpeering: + resource_group: "{{ resource_group }}" + name: "{{ peering_name }}" + virtual_network: "{{ vnetname1 }}" + remote_virtual_network: + resource_group: "{{ resource_group_secondary }}" + name: "{{ vnetname2 }}" + allow_virtual_network_access: false + allow_forwarded_traffic: true + register: output + +- assert: + that: output.changed + +- name: Update virtual network peering (idempotent) + azure_rm_virtualnetworkpeering: + resource_group: "{{ resource_group }}" + name: "{{ peering_name }}" + virtual_network: "{{ vnetname1 }}" + remote_virtual_network: + resource_group: "{{ resource_group_secondary }}" + name: "{{ vnetname2 }}" + allow_virtual_network_access: false + allow_forwarded_traffic: true + register: output + +- assert: + that: not output.changed + +- name: Update virtual network peering + azure_rm_virtualnetworkpeering: + resource_group: "{{ resource_group }}" + name: "{{ peering_name }}" + virtual_network: "{{ vnetname1 }}" + remote_virtual_network: + resource_group: "{{ resource_group_secondary }}" + name: "{{ vnetname2 }}" + allow_virtual_network_access: true + allow_forwarded_traffic: false + register: output + +- assert: + that: output.changed + +- name: Get facts + azure_rm_virtualnetworkpeering_info: + resource_group: "{{ resource_group }}" + name: "{{ peering_name }}" + virtual_network: "{{ vnetname1 }}" + register: facts + +- name: Assert Facts + assert: + that: + - facts['vnetpeerings'] | length == 1 + - facts['vnetpeerings'][0]['id'] + - facts['vnetpeerings'][0]['peering_state'] + - facts['vnetpeerings'][0]['remote_virtual_network'] + - facts['vnetpeerings'][0]['provisioning_state'] + +- name: Delete virtual network peering + azure_rm_virtualnetworkpeering: + resource_group: "{{ resource_group }}" + name: "{{ peering_name }}" + virtual_network: "{{ vnetname1 }}" + state: absent + register: output + +- name: Delete first virtual network + azure_rm_virtualnetwork: + name: "{{ vnetname1 }}" + resource_group: "{{ resource_group }}" + state: absent + +- name: Delete virtual network + azure_rm_virtualnetwork: + name: "{{ vnetname2 }}" + resource_group: "{{ resource_group_secondary }}" + state: absent \ No newline at end of file diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualwan/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualwan/aliases new file mode 100644 index 000000000..5d29c6c4d --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualwan/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group10 +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualwan/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualwan/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualwan/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualwan/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualwan/tasks/main.yml new file mode 100644 index 000000000..c53b7d513 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualwan/tasks/main.yml @@ -0,0 +1,61 @@ + - name: Prepare random number + set_fact: + rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + run_once: yes + + - name: Create a VirtualWan + azure_rm_virtualwan: + resource_group: "{{ resource_group }}" + name: "virtualwan{{ rpfx }}" + office365_local_breakout_category: All + disable_vpn_encryption: true + allow_branch_to_branch_traffic: true + allow_vnet_to_vnet_traffic: true + virtual_wan_type: Basic + register: output + + - assert: + that: + - output.changed + + - name: Create a VirtualWan again (Idempotent test) + azure_rm_virtualwan: + resource_group: "{{ resource_group }}" + name: "virtualwan{{ rpfx }}" + register: output + + - assert: + that: + - not output.changed + + - name: Update the VirtualWan + azure_rm_virtualwan: + resource_group: "{{ resource_group }}" + office365_local_breakout_category: All + name: "virtualwan{{ rpfx }}" + disable_vpn_encryption: false + allow_branch_to_branch_traffic: true + allow_vnet_to_vnet_traffic: true + virtual_wan_type: Basic + register: output + + - assert: + that: + - output.changed + + - name: Get VirtualWan info + azure_rm_virtualwan_info: + resource_group: "{{ resource_group }}" + name: "virtualwan{{ rpfx }}" + register: output + + - assert: + that: + - output.virtual_wans[0]['allow_branch_to_branch_traffic'] == true + - output.virtual_wans[0]['disable_vpn_encryption'] == false + + - name: Delete the VirtualWan + azure_rm_virtualwan: + resource_group: "{{ resource_group }}" + name: "virtualwan{{ rpfx }}" + state: absent diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_vmbackuppolicy/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_vmbackuppolicy/aliases new file mode 100644 index 000000000..cc941b59c --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_vmbackuppolicy/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group12 +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_vmbackuppolicy/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_vmbackuppolicy/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_vmbackuppolicy/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_vmbackuppolicy/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_vmbackuppolicy/tasks/main.yml new file mode 100644 index 000000000..9eb906f90 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_vmbackuppolicy/tasks/main.yml @@ -0,0 +1,78 @@ +- name: Fix resource prefix + set_fact: + name: "myPolicy{{ resource_group | hash('md5') | truncate(22, True, '') }}" + vault_name: "myVault" + location: "eastus" + +- name: Create Azure Recovery Service vault + azure_rm_recoveryservicesvault: + resource_group: "{{ resource_group }}" + name: "{{ name }}" + location: "{{ location }}" + state: "present" + register: vault + +- name: Create VM Backup Policy + azure_rm_vmbackuppolicy: + resource_group: "{{ resource_group }}" + name: "{{ name }}" + vault_name: "{{ vault.response.name }}" + state: "present" + register: output + +- name: Assert that output has changed + assert: + that: + - output.changed + +- name: Create VM Backup Policy (idempotent) + azure_rm_vmbackuppolicy: + resource_group: "{{ resource_group }}" + name: "{{ name }}" + vault_name: "{{ vault.response.name }}" + state: "present" + register: output + +- name: Assert that output has no changed + assert: + that: + - not output.changed + +- name: Get VM Backup Policy Details + azure_rm_vmbackuppolicy_info: + resource_group: "{{ resource_group }}" + name: "{{ name }}" + vault_name: "{{ vault.response.name }}" + register: output + +- name: Assert that output has changed + assert: + that: + - output.response.id != None + - output.response.name != None + +- name: Delete VM Backup Policy + azure_rm_vmbackuppolicy: + resource_group: "{{ resource_group }}" + name: "{{ name }}" + vault_name: "{{ vault.response.name }}" + state: "absent" + register: output + +- name: Assert that output has changed + assert: + that: + - output.changed + +- name: Delete Azure Recovery Service vault + azure_rm_recoveryservicesvault: + resource_group: "{{ resource_group }}" + name: "{{ vault.response.name }}" + location: "{{ location }}" + state: "absent" + register: output + +- name: Assert that output has changed + assert: + that: + - output.changed diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_vpnsite/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_vpnsite/aliases new file mode 100644 index 000000000..8767e0189 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_vpnsite/aliases @@ -0,0 +1,3 @@ +cloud/azure +destructive +shippable/azure/group10 diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_vpnsite/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_vpnsite/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_vpnsite/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_vpnsite/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_vpnsite/tasks/main.yml new file mode 100644 index 000000000..07cb7dd45 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_vpnsite/tasks/main.yml @@ -0,0 +1,102 @@ + - name: Create vpn site name + set_fact: + vpn_site: "vpn{{ resource_group | hash('md5') | truncate(22, True, '') }}" + + - name: Create a VpnSite + azure_rm_vpnsite: + resource_group: "{{ resource_group }}" + name: "{{ vpn_site }}" + ip_address: 10.0.0.0 + is_security_site: true + device_properties: + device_vendor: 10.0.0.23 + device_model: test_model + link_speed_in_mbps: 10 + address_space: + address_prefixes: + - 10.0.0.0/16 + bgp_properties: + asn: 1234 + bgp_peering_address: 192.168.0.0 + o365_policy: + break_out_categories: + allow: false + optimize: false + default: false + register: output + + - assert: + that: + - output.changed + + - name: Create the VpnSite without change (Idempotent test) + azure_rm_vpnsite: + resource_group: "{{ resource_group }}" + name: "{{ vpn_site }}" + ip_address: 10.0.0.0 + is_security_site: true + device_properties: + device_vendor: 10.0.0.23 + device_model: test_model + link_speed_in_mbps: 10 + address_space: + address_prefixes: + - 10.0.0.0/16 + bgp_properties: + asn: 1234 + bgp_peering_address: 192.168.0.0 + o365_policy: + break_out_categories: + allow: false + optimize: false + default: false + register: output + + - assert: + that: + - not output.changed + + - name: Update the VpnSite's device properties + azure_rm_vpnsite: + resource_group: "{{ resource_group }}" + name: "{{ vpn_site }}" + ip_address: 10.0.0.0 + is_security_site: true + device_properties: + device_vendor: 10.0.0.23 + device_model: test_model + link_speed_in_mbps: 100 + address_space: + address_prefixes: + - 10.0.0.0/16 + bgp_properties: + asn: 1234 + bgp_peering_address: 192.168.0.0 + o365_policy: + break_out_categories: + allow: false + optimize: false + default: false + register: output + + - assert: + that: + - output.changed + + - name: Get the VpnSite info + azure_rm_vpnsite_info: + resource_group: "{{ resource_group }}" + name: "{{ vpn_site }}" + register: output + + - assert: + that: + - "output.vpn_sites[0].is_security_site == true" + - "output.vpn_sites[0].ip_address == '10.0.0.0'" + - "output.vpn_sites[0].device_properties.link_speed_in_mbps == 100" + + - name: Delete the VpnSite + azure_rm_vpnsite: + resource_group: "{{ resource_group }}" + name: "{{ vpn_site }}" + state: absent diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webapp/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webapp/aliases new file mode 100644 index 000000000..c7c2aff0c --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webapp/aliases @@ -0,0 +1,5 @@ +cloud/azure +shippable/azure/group3 +destructive +azure_rm_webapp_facts +azure_rm_webappslot diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webapp/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webapp/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webapp/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webapp/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webapp/tasks/main.yml new file mode 100644 index 000000000..b286bfe13 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webapp/tasks/main.yml @@ -0,0 +1,537 @@ +- name: Prepare facts + set_fact: + resource_prefix: "{{ resource_group_secondary | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + run_once: yes + +- name: Fix resource prefix + set_fact: + linux_app_plan_resource_group: "{{ resource_group_secondary }}" + win_app_name: "{{ (resource_prefix | replace('-','x'))[-8:] }}{{ 1000 | random}}winapp" + linux_app_name: "{{ (resource_prefix | replace('-','x'))[-8:] }}{{ 1000 | random}}linuxapp" + win_plan_name: "{{ (resource_prefix | replace('-','x'))[-8:] }}winplan" + linux_plan_name: "{{ (resource_group_secondary | replace('-','x'))[-8:] }}linplan" + slot1_name: "stage1" + +- name: Create a windows web app with non-exist app service plan + azure_rm_webapp: + resource_group: "{{ resource_group }}" + name: "{{ win_app_name }}1" + plan: + resource_group: "{{ resource_group }}" + name: "{{ win_plan_name }}" + is_linux: false + sku: S1 + +- name: Create a windows web app with existing app service plan + azure_rm_webapp: + resource_group: "{{ resource_group }}" + name: "{{ win_app_name }}2" + plan: "{{ win_plan_name }}" + register: output + +- name: stop the web app + azure_rm_webapp: + resource_group: "{{ resource_group }}" + name: "{{ win_app_name }}2" + plan: "{{ win_plan_name }}" + app_state: stopped + register: output + +- name: assert output changed + assert: + that: + output.changed + +- name: get the web app + azure_rm_webapp_info: + resource_group: "{{ resource_group }}" + name: "{{ win_app_name }}2" + register: stopped + +- name: assert web app is stopped + assert: + that: + - stopped.webapps[0].app_state == "Stopped" + +- name: Create a windows web app with existing app service plan, try to update some root level params + azure_rm_webapp: + resource_group: "{{ resource_group }}" + name: "{{ win_app_name }}3" + plan: "{{ win_plan_name }}" + https_only: true + tags: + testwebapptag: test + register: output + +- name: get web app with resource group and tag + azure_rm_webapp_info: + resource_group: "{{ resource_group }}" + name: "{{ win_app_name }}3" + tags: + - testwebapptag + register: output + +- assert: + that: + - output.webapps | length == 1 + +- name: Create a win web app with java run time specific + azure_rm_webapp: + resource_group: "{{ resource_group }}" + name: "{{ win_app_name }}4" + plan: "{{ win_plan_name }}" + frameworks: + - name: "java" + version: "1.8" + settings: + java_container: "Tomcat" + java_container_version: "8.0" + app_settings: + testkey: "testvalue" + register: output + +- name: assert the web app was created + assert: + that: output.changed + +- name: get web app with name + azure_rm_webapp_info: + resource_group: "{{ resource_group }}" + name: "{{ win_app_name }}4" + register: output + +- assert: + that: + - output.webapps | length == 1 + - output.webapps[0].app_settings | length == 1 + - output.webapps[0].frameworks | length > 1 # there's default frameworks eg net_framework + +- name: Update app settings and framework + azure_rm_webapp: + resource_group: "{{ resource_group }}" + name: "{{ win_app_name }}4" + plan: "{{ win_plan_name }}" + frameworks: + - name: "java" + version: "1.7" + settings: + java_container: "Tomcat" + java_container_version: "8.5" + app_settings: + testkey2: "testvalue2" + register: output + +- name: Assert the web app was updated + assert: + that: + - output.changed + +- name: get web app with name + azure_rm_webapp_info: + resource_group: "{{ resource_group }}" + name: "{{ win_app_name }}4" + register: output + +- name: Assert updating + assert: + that: + - output.webapps[0].app_settings | length == 2 + - output.webapps[0].app_settings['testkey'] == 'testvalue' + - output.webapps[0].app_settings['testkey2'] == 'testvalue2' + +- name: get web app with return publishing profile + azure_rm_webapp_info: + resource_group: "{{ resource_group }}" + name: "{{ win_app_name }}4" + return_publish_profile: true + register: output + +- assert: + that: + - output.webapps | length == 1 + - output.webapps[0].publishing_username != "" + - output.webapps[0].publishing_password != "" + +- name: Purge all existing app settings + azure_rm_webapp: + resource_group: "{{ resource_group }}" + name: "{{ win_app_name }}4" + plan: "{{ win_plan_name }}" + purge_app_settings: true + register: output + +- name: Assert the web app was updated + assert: + that: output.changed + +- name: Create a win web app with python run time and php run time + azure_rm_webapp: + resource_group: "{{ resource_group }}" + name: "{{ win_app_name }}5" + plan: "{{ win_plan_name }}" + frameworks: + - name: "python" + version: "2.7" + - name: node + version: "6.6" + - name: "php" + version: "7.0" + register: output + +- name: Assert the web app was created + assert: + that: output.changed + +- name: Create a docker web app with some app settings + azure_rm_webapp: + resource_group: "{{ resource_group }}" + name: "{{ win_app_name }}6" + plan: + resource_group: "{{ linux_app_plan_resource_group }}" + name: "{{ linux_plan_name }}" + is_linux: true + sku: S1 + number_of_workers: 1 + container_settings: + name: "ansible/ansible:ubuntu1404" + register: output + +- name: Assert the web app was created + assert: + that: output.changed + +- name: Create a docker web app with private acr registry + azure_rm_webapp: + resource_group: "{{ resource_group }}" + name: "{{ win_app_name }}7" + plan: + resource_group: "{{ linux_app_plan_resource_group }}" + name: "{{ linux_plan_name }}" + container_settings: + name: "ansible/ansible:ubuntu1404" + registry_server_url: test.io + registry_server_user: user + registry_server_password: password + register: output + +- name: Assert the web app was created + assert: + that: output.changed + +- name: Create a linux web app with nodejs framework + azure_rm_webapp: + resource_group: "{{ resource_group }}" + name: "{{ win_app_name }}8" + plan: + resource_group: "{{ linux_app_plan_resource_group }}" + name: "{{ linux_plan_name }}" + frameworks: + - name: node + version: "6.6" + register: output + +- name: Should be idempotent with linux web app created + azure_rm_webapp: + resource_group: "{{ resource_group }}" + name: "{{ win_app_name }}8" + plan: + resource_group: "{{ linux_app_plan_resource_group }}" + name: "{{ linux_plan_name }}" + frameworks: + - name: node + version: "6.6" + register: output + +- assert: + that: not output.changed + +- name: Update nodejs framework + azure_rm_webapp: + resource_group: "{{ resource_group }}" + name: "{{ win_app_name }}8" + plan: + resource_group: "{{ linux_app_plan_resource_group }}" + name: "{{ linux_plan_name }}" + frameworks: + - name: node + version: "6.9" + register: output + +- name: Assert the web app was created + assert: + that: output.changed + +- name: Create a linux web app with deployment source github + azure_rm_webapp: + resource_group: "{{ resource_group }}" + name: "{{ win_app_name }}10" + plan: + resource_group: "{{ linux_app_plan_resource_group }}" + name: "{{ linux_plan_name }}" + deployment_source: + url: "https://github.com/test/test" + branch: master + scm_type: GitHub + register: output + +- name: Assert the web app was created + assert: + that: output.changed + +- name: Delete web app + azure_rm_webapp: + resource_group: "{{ resource_group }}" + name: "{{ win_app_name }}8" + state: absent + register: output + +- name: Assert the web app was deleted + assert: + that: output.changed + +- name: assert error that java is mutually exclusive with frameworks + azure_rm_webapp: + resource_group: "{{ resource_group }}" + name: "{{ win_app_name }}11" + plan: "{{ win_plan_name }}" + frameworks: + - name: "python" + version: "2.7" + - name: "java" + version: "1.8" + register: fail_win_java_version_mutual_exclusive + failed_when: 'fail_win_java_version_mutual_exclusive.msg != "Java is mutually exclusive with other frameworks."' + +- name: assert error when linux web app, only can specify one framework + azure_rm_webapp: + resource_group: "{{ resource_group }}" + name: "{{ linux_plan_name }}12" + plan: + resource_group: "{{ linux_app_plan_resource_group }}" + name: "{{ linux_plan_name }}" + frameworks: + - name: "python" + version: "2.7" + - name: "node" + version: "6.6" + register: fail_linux_one_framework_only + failed_when: fail_linux_one_framework_only.msg != "Can specify one framework only for Linux web app." + +- name: Create a linux web app with java tomcat container + azure_rm_webapp: + resource_group: "{{ resource_group }}" + name: "{{ win_app_name }}13" + plan: + resource_group: "{{ linux_app_plan_resource_group }}" + name: "{{ linux_plan_name }}" + frameworks: + - name: java + version: "8" + settings: + java_container: "tomcat" + java_container_version: "8.5" + register: output + +- name: Assert the web app was created + assert: + that: output.changed + +- name: Get facts with publish profile + azure_rm_webapp_info: + resource_group: "{{ resource_group }}" + name: "{{ win_app_name }}13" + no_log: true + register: facts + +- name: Assert publish profile returned + assert: + that: + - facts.webapps[0].ftp_publish_url != '' + +- name: Create a web app with various site config params + azure_rm_webapp: + resource_group: "{{ resource_group }}" + name: "{{ linux_app_name }}-siteconfig" + plan: + resource_group: "{{ linux_app_plan_resource_group }}" + name: "{{ linux_app_name }}-siteconfig-plan" + is_linux: true + sku: S1 + frameworks: + - name: java + version: "8" + settings: + java_container: "tomcat" + java_container_version: "8.5" + client_affinity_enabled: false + https_only: true + always_on: true + min_tls_version: "1.2" + ftps_state: "Disabled" + register: output +- name: Assert the web app was created + assert: + that: output.changed + +- name: Create a web app with various site config params - idempotent + azure_rm_webapp: + resource_group: "{{ resource_group }}" + name: "{{ linux_app_name }}-siteconfig" + plan: + resource_group: "{{ linux_app_plan_resource_group }}" + name: "{{ linux_app_name }}-siteconfig-plan" + is_linux: true + sku: S1 + frameworks: + - name: java + version: "8" + settings: + java_container: "tomcat" + java_container_version: "8.5" + client_affinity_enabled: false + https_only: true + always_on: true + min_tls_version: "1.2" + ftps_state: "Disabled" + register: output +- name: Assert the web app not changed + assert: + that: not output.changed + +- name: Get facts for site config params + azure_rm_webapp_info: + resource_group: "{{ resource_group }}" + name: "{{ linux_app_name }}-siteconfig" + register: facts +- name: Assert site config params meet expectations + assert: + that: + - facts.webapps[0].always_on + - facts.webapps[0].min_tls_version == '1.2' + - facts.webapps[0].ftps_state == 'Disabled' + +- name: Update web app with various site config params - single change + azure_rm_webapp: + resource_group: "{{ resource_group }}" + name: "{{ linux_app_name }}-siteconfig" + plan: + resource_group: "{{ linux_app_plan_resource_group }}" + name: "{{ linux_app_name }}-siteconfig-plan" + is_linux: true + sku: S1 + frameworks: + - name: java + version: "8" + settings: + java_container: "tomcat" + java_container_version: "8.5" + client_affinity_enabled: false + https_only: true + always_on: true + min_tls_version: "1.2" + ftps_state: "FtpsOnly" + register: output +- name: Assert the web app was updated + assert: + that: output.changed + +- name: Get facts for site config params + azure_rm_webapp_info: + resource_group: "{{ resource_group }}" + name: "{{ linux_app_name }}-siteconfig" + register: facts +- name: Assert site config params meet expectations + assert: + that: + - facts.webapps[0].always_on + - facts.webapps[0].min_tls_version == '1.2' + - facts.webapps[0].ftps_state == 'FtpsOnly' + +- name: Create a webapp slot (Check mode) + azure_rm_webappslot: + resource_group: "{{ resource_group }}" + webapp_name: "{{ win_app_name }}13" + name: "{{ slot1_name }}" + configuration_source: "{{ win_app_name }}13" + app_settings: + testkey: testvalue + check_mode: yes + register: output + +- name: Assert slot check mode creation + assert: + that: + - output.changed + +- name: Create a webapp slot + azure_rm_webappslot: + resource_group: "{{ resource_group }}" + webapp_name: "{{ win_app_name }}13" + name: "{{ slot1_name }}" + configuration_source: "{{ win_app_name }}13" + app_settings: + testkey: testvalueslot + register: output + +- name: Assert slot creation + assert: + that: + - output.changed + +- name: Update webapp slot (idempotence) + azure_rm_webappslot: + resource_group: "{{ resource_group }}" + webapp_name: "{{ win_app_name }}13" + name: "{{ slot1_name }}" + app_settings: + testkey: testvalueslot + register: output + +- name: Assert idempotence + assert: + that: + - not output.changed + +- name: Update webapp slot + azure_rm_webappslot: + resource_group: "{{ resource_group }}" + webapp_name: "{{ win_app_name }}13" + name: "{{ slot1_name }}" + frameworks: + - name: "node" + version: "10.1" + app_settings: + testkey: testvalue2 + register: output + +- name: Assert updating + assert: + that: + - output.changed + +- name: Swap webapp slot + azure_rm_webappslot: + resource_group: "{{ resource_group }}" + webapp_name: "{{ win_app_name }}13" + name: "{{ slot1_name }}" + swap: + action: swap + register: output + +- name: Assert swap + assert: + that: + - output.changed + +- name: Stop webapp slot + azure_rm_webappslot: + resource_group: "{{ resource_group }}" + webapp_name: "{{ win_app_name }}13" + name: "{{ slot1_name }}" + app_state: stopped + register: output + +- name: Assert stopped + assert: + that: + - output.changed diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webappaccessrestriction/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webappaccessrestriction/aliases new file mode 100644 index 000000000..759eafa2d --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webappaccessrestriction/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group3 +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webappaccessrestriction/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webappaccessrestriction/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webappaccessrestriction/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webappaccessrestriction/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webappaccessrestriction/tasks/main.yml new file mode 100644 index 000000000..05061d195 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webappaccessrestriction/tasks/main.yml @@ -0,0 +1,268 @@ +- name: Prepare random number + set_fact: + rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + run_once: true + +- name: Create a web app + azure_rm_webapp: + resource_group: "{{ resource_group }}" + name: webapp{{ rpfx }} + plan: + resource_group: "{{ resource_group }}" + name: webappplan{{ rpfx }} + is_linux: false + sku: S1 + +- name: "Create webapp access restriction - check mode" + azure_rm_webappaccessrestriction: + name: webapp{{ rpfx }} + resource_group: "{{ resource_group }}" + ip_security_restrictions: + - name: "Datacenter 1" + action: "Allow" + ip_address: "1.1.1.1/24" + priority: 1 + - name: "Datacenter 2" + action: "Allow" + ip_address: "2.2.2.2/24" + priority: 2 + scm_ip_security_restrictions_use_main: true + register: output + check_mode: true +- name: Assert the resource is well created + assert: + that: output.changed + +- name: "Create webapp access restriction" + azure_rm_webappaccessrestriction: + name: webapp{{ rpfx }} + resource_group: "{{ resource_group }}" + ip_security_restrictions: + - name: "Datacenter 1" + action: "Allow" + ip_address: "1.1.1.1/24" + priority: 1 + - name: "Datacenter 2" + action: "Allow" + ip_address: "2.2.2.2/24" + priority: 2 + scm_ip_security_restrictions_use_main: true + register: output +- name: Assert the resource is well created + assert: + that: + - output.changed + - output.ip_security_restrictions | length == 2 + - output.ip_security_restrictions[0].action == 'Allow' + - output.ip_security_restrictions[0].ip_address == '1.1.1.1/24' + - output.ip_security_restrictions[1].action == 'Allow' + - output.ip_security_restrictions[1].ip_address == '2.2.2.2/24' + - output.scm_ip_security_restrictions_use_main == true + +- name: "Check webapp access restriction facts 1" + azure_rm_webappaccessrestriction_info: + name: webapp{{ rpfx }} + resource_group: "{{ resource_group }}" + register: output +- name: Assert restrictions + assert: + that: + - not output.changed + - output.ip_security_restrictions | length == 2 + - output.ip_security_restrictions[0].action == 'Allow' + - output.ip_security_restrictions[0].ip_address == '1.1.1.1/24' + - output.ip_security_restrictions[1].action == 'Allow' + - output.ip_security_restrictions[1].ip_address == '2.2.2.2/24' + - output.scm_ip_security_restrictions_use_main == true + +- name: "Create webapp access restriction - idempotent" + azure_rm_webappaccessrestriction: + name: webapp{{ rpfx }} + resource_group: "{{ resource_group }}" + ip_security_restrictions: + - name: "Datacenter 1" + action: "Allow" + ip_address: "1.1.1.1/24" + priority: 1 + - name: "Datacenter 2" + action: "Allow" + ip_address: "2.2.2.2/24" + priority: 2 + scm_ip_security_restrictions_use_main: true + register: output +- name: Assert the resource is not changed + assert: + that: not output.changed + +- name: "Delete specific webapp access restriction" + azure_rm_webappaccessrestriction: + name: webapp{{ rpfx }} + resource_group: "{{ resource_group }}" + ip_security_restrictions: + - name: "Datacenter 1" + action: "Allow" + ip_address: "1.1.1.1/24" + priority: 1 + scm_ip_security_restrictions_use_main: true + register: output +- name: Assert the resource is updated + assert: + that: + - output.changed + - output.ip_security_restrictions | length == 1 + - output.ip_security_restrictions[0].action == 'Allow' + - output.ip_security_restrictions[0].ip_address == '1.1.1.1/24' + - output.scm_ip_security_restrictions_use_main == true + +- name: "Update existing webapp access restriction 1" + azure_rm_webappaccessrestriction: + name: webapp{{ rpfx }} + resource_group: "{{ resource_group }}" + ip_security_restrictions: + - name: "Datacenter 1" + action: "Deny" + ip_address: "1.2.3.4/24" + priority: 1 + scm_ip_security_restrictions_use_main: true + register: output +- name: Assert the resource is updated + assert: + that: + - output.changed + - output.ip_security_restrictions | length == 1 + - output.ip_security_restrictions[0].action == 'Deny' + - output.ip_security_restrictions[0].ip_address == '1.2.3.4/24' + - output.scm_ip_security_restrictions_use_main == true + +- name: "Update existing webapp access restriction 1" + azure_rm_webappaccessrestriction: + name: webapp{{ rpfx }} + resource_group: "{{ resource_group }}" + ip_security_restrictions: + - name: "Datacenter 1" + action: "Deny" + ip_address: "1.2.3.4/24" + priority: 1 + scm_ip_security_restrictions_use_main: false + register: output +- name: Assert the resource is updated + assert: + that: + - output.changed + - output.ip_security_restrictions | length == 1 + - output.ip_security_restrictions[0].action == 'Deny' + - output.ip_security_restrictions[0].ip_address == '1.2.3.4/24' + - output.scm_ip_security_restrictions_use_main == false + +- name: "Update existing webapp access restriction 3" + azure_rm_webappaccessrestriction: + name: webapp{{ rpfx }} + resource_group: "{{ resource_group }}" + ip_security_restrictions: + - name: "Datacenter 1" + action: "Deny" + ip_address: "1.2.3.4/24" + priority: 1 + scm_ip_security_restrictions: + - name: "Datacenter 1" + action: "Deny" + ip_address: "1.2.3.4/24" + priority: 1 + scm_ip_security_restrictions_use_main: false + register: output +- name: Assert the resource is updated + assert: + that: + - output.changed + - output.ip_security_restrictions | length == 1 + - output.ip_security_restrictions[0].action == 'Deny' + - output.ip_security_restrictions[0].ip_address == '1.2.3.4/24' + - output.scm_ip_security_restrictions | length == 1 + - output.ip_security_restrictions[0].action == 'Deny' + - output.ip_security_restrictions[0].ip_address == '1.2.3.4/24' + - output.scm_ip_security_restrictions_use_main == false + +- name: "Update existing webapp access restriction 4" + azure_rm_webappaccessrestriction: + name: webapp{{ rpfx }} + resource_group: "{{ resource_group }}" + ip_security_restrictions: + - name: "Datacenter 1" + action: "Deny" + ip_address: "1.2.3.4/24" + priority: 1 + scm_ip_security_restrictions: + - name: "Datacenter 1" + action: "Deny" + ip_address: "1.2.3.4/24" + priority: 1 + - name: "Datacenter 2" + action: "Allow" + ip_address: "2.2.2.2/24" + priority: 2 + scm_ip_security_restrictions_use_main: false + register: output +- name: Assert the resource is updated + assert: + that: + - output.changed + - output.ip_security_restrictions | length == 1 + - output.ip_security_restrictions[0].action == 'Deny' + - output.ip_security_restrictions[0].ip_address == '1.2.3.4/24' + - output.scm_ip_security_restrictions | length == 2 + - output.scm_ip_security_restrictions[0].action == 'Deny' + - output.scm_ip_security_restrictions[0].ip_address == '1.2.3.4/24' + - output.scm_ip_security_restrictions[1].action == 'Allow' + - output.scm_ip_security_restrictions[1].ip_address == '2.2.2.2/24' + - output.scm_ip_security_restrictions_use_main == false + +- name: "Update existing webapp access restriction - idempotent" + azure_rm_webappaccessrestriction: + name: webapp{{ rpfx }} + resource_group: "{{ resource_group }}" + ip_security_restrictions: + - name: "Datacenter 1" + action: "Deny" + ip_address: "1.2.3.4/24" + priority: 1 + scm_ip_security_restrictions: + - name: "Datacenter 1" + action: "Deny" + ip_address: "1.2.3.4/24" + priority: 1 + - name: "Datacenter 2" + action: "Allow" + ip_address: "2.2.2.2/24" + priority: 2 + scm_ip_security_restrictions_use_main: false + register: output +- name: Assert the resource is not changed + assert: + that: not output.changed + +- name: "Delete webapp access restrictions" + azure_rm_webappaccessrestriction: + name: webapp{{ rpfx }} + resource_group: "{{ resource_group }}" + state: "absent" + register: output +- name: Assert the resource is deleted + assert: + that: + - output.changed + - output.ip_security_restrictions | length == 0 + - output.scm_ip_security_restrictions | length == 0 + - output.scm_ip_security_restrictions_use_main == false + +- name: "Check webapp access restriction facts 3" + azure_rm_webappaccessrestriction_info: + name: webapp{{ rpfx }} + resource_group: "{{ resource_group }}" + register: output +- name: Assert no restrictions + assert: + that: + - not output.changed + - output.ip_security_restrictions | length <= 1 + - output.scm_ip_security_restrictions | length <= 1 + - output.scm_ip_security_restrictions_use_main == false diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webappvnetconnection/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webappvnetconnection/aliases new file mode 100644 index 000000000..759eafa2d --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webappvnetconnection/aliases @@ -0,0 +1,3 @@ +cloud/azure +shippable/azure/group3 +destructive diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webappvnetconnection/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webappvnetconnection/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webappvnetconnection/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webappvnetconnection/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webappvnetconnection/tasks/main.yml new file mode 100644 index 000000000..66a1b55cf --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webappvnetconnection/tasks/main.yml @@ -0,0 +1,129 @@ +- name: Prepare random number + set_fact: + rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + run_once: true + +- name: Create a virtual network + azure_rm_virtualnetwork: + name: vnet{{ rpfx }} + resource_group: "{{ resource_group }}" + address_prefixes_cidr: + - 10.1.0.0/16 + - 172.100.0.0/16 + dns_servers: + - 127.0.0.1 + - 127.0.0.2 +- name: Create a subnet + azure_rm_subnet: + name: subnet{{ rpfx }} + virtual_network_name: vnet{{ rpfx }} + resource_group: "{{ resource_group }}" + address_prefix_cidr: 10.1.0.0/24 + delegations: + - name: 'mydeleg' + serviceName: 'Microsoft.Web/serverFarms' + register: subnet_output +- name: Create a web app + azure_rm_webapp: + resource_group: "{{ resource_group }}" + name: webapp{{ rpfx }} + plan: + resource_group: "{{ resource_group }}" + name: webappplan{{ rpfx }} + is_linux: false + sku: S1 + +- name: "Create webapp vnetconnection - check mode" + azure_rm_webappvnetconnection: + name: webapp{{ rpfx }} + resource_group: "{{ resource_group }}" + vnet_name: vnet{{ rpfx }} + subnet: subnet{{ rpfx }} + check_mode: true + register: output +- name: Assert the resource is well created + assert: + that: output.changed + +- name: "Check webapp vnetconnection facts 1" + azure_rm_webappvnetconnection_info: + name: webapp{{ rpfx }} + resource_group: "{{ resource_group }}" + register: output +- name: Assert the resource has no connections + assert: + that: + - not output.changed + - output.connection | length == 0 + +- name: "Create webapp vnetconnection" + azure_rm_webappvnetconnection: + name: webapp{{ rpfx }} + resource_group: "{{ resource_group }}" + vnet_name: vnet{{ rpfx }} + subnet: subnet{{ rpfx }} + register: output +- name: Assert the resource is well created + assert: + that: + - output.changed + - output.connection.vnet_name == 'vnet{{ rpfx }}' + - output.connection.subnet_name == 'subnet{{ rpfx }}' + - output.connection.vnet_resource_group == '{{ resource_group }}' + +- name: "Check webapp vnetconnection facts 2" + azure_rm_webappvnetconnection_info: + name: webapp{{ rpfx }} + resource_group: "{{ resource_group }}" + register: output +- name: Assert the connection exists + assert: + that: + - not output.changed + - output.connection.vnet_name == 'vnet{{ rpfx }}' + - output.connection.subnet_name == 'subnet{{ rpfx }}' + - output.connection.vnet_resource_group == '{{ resource_group }}' + +- name: "Create webapp vnetconnection - idempotent" + azure_rm_webappvnetconnection: + name: webapp{{ rpfx }} + resource_group: "{{ resource_group }}" + vnet_name: vnet{{ rpfx }} + subnet: subnet{{ rpfx }} + register: output +- name: Assert the resource is not changed + assert: + that: not output.changed + +- name: "Delete webapp vnetconnection" + azure_rm_webappvnetconnection: + name: webapp{{ rpfx }} + resource_group: "{{ resource_group }}" + state: "absent" + register: output +- name: Assert the connection is deleted + assert: + that: + - output.changed + - output.connection | length == 0 + +- name: "Check webapp vnetconnection facts 3" + azure_rm_webappvnetconnection_info: + name: webapp{{ rpfx }} + resource_group: "{{ resource_group }}" + register: output +- name: Assert the resource has no connections + assert: + that: + - not output.changed + - output.connection | length == 0 + +- name: "Delete webapp vnetconnection - idempotent" + azure_rm_webappvnetconnection: + name: webapp{{ rpfx }} + resource_group: "{{ resource_group }}" + state: "absent" + register: output +- name: Assert the resource is not changed + assert: + that: not output.changed diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_workspace/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_workspace/aliases new file mode 100644 index 000000000..15133fe63 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_workspace/aliases @@ -0,0 +1,5 @@ +cloud/azure +shippable/azure/group4 +destructive +azure_rm_workspace +azure_rm_workspace_facts diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_workspace/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_workspace/meta/main.yml new file mode 100644 index 000000000..95e1952f9 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_workspace/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - setup_azure diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_workspace/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_workspace/tasks/main.yml new file mode 100644 index 000000000..7f9a05f79 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_workspace/tasks/main.yml @@ -0,0 +1,128 @@ +- name: Prepare random number + set_fact: + name: "workspace{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}" + +- name: Create workspace (check mode) + azure_rm_loganalyticsworkspace: + name: "{{ name }}" + resource_group: "{{ resource_group }}" + retention_in_days: 40 + check_mode: yes + register: output + +- assert: + that: + - output.changed + +- name: Get workspace + azure_rm_loganalyticsworkspace_info: + name: "{{ name }}" + resource_group: "{{ resource_group }}" + register: facts + +- assert: + that: + - facts.workspaces | length == 0 + +- name: Create workspace + azure_rm_loganalyticsworkspace: + name: "{{ name }}" + resource_group: "{{ resource_group }}" + retention_in_days: 40 + register: output + +- assert: + that: + - output.retention_in_days == 40 + - output.changed + - output.intelligence_packs + +- name: Create workspace (idempontent) + azure_rm_loganalyticsworkspace: + name: "{{ name }}" + resource_group: "{{ resource_group }}" + retention_in_days: 40 + register: output + +- assert: + that: + - not output.changed + +- name: Get workspace + azure_rm_loganalyticsworkspace_info: + name: "{{ name }}" + resource_group: "{{ resource_group }}" + register: facts + +- assert: + that: + - facts.workspaces | length == 1 + - facts.workspaces[0].id == output.id + +- set_fact: + pack: "{{ pack | default({}) | combine({output.intelligence_packs[0].name: not output.intelligence_packs[0].enabled}) }}" + +- name: Update intelligence pack + azure_rm_loganalyticsworkspace: + name: "{{ name }}" + resource_group: "{{ resource_group }}" + intelligence_packs: "{{ pack }}" + register: intelligence + +- assert: + that: + - intelligence.intelligence_packs[0].enabled != output.intelligence_packs[0].enabled + +- name: Remove workspace (check mode) + azure_rm_loganalyticsworkspace: + name: "{{ name }}" + resource_group: "{{ resource_group }}" + state: absent + check_mode: yes + register: output + +- assert: + that: + - output.changed + +- name: Get workspace + azure_rm_loganalyticsworkspace_info: + name: "{{ name }}" + resource_group: "{{ resource_group }}" + register: facts + +- assert: + that: + - facts.workspaces | length == 1 + +- name: Remove workspace + azure_rm_loganalyticsworkspace: + name: "{{ name }}" + resource_group: "{{ resource_group }}" + state: absent + register: output + +- assert: + that: + - output.changed + +- name: Get workspace + azure_rm_loganalyticsworkspace_info: + name: "{{ name }}" + resource_group: "{{ resource_group }}" + register: facts + +- assert: + that: + - facts.workspaces | length == 0 + +- name: Remove workspace (idempontent) + azure_rm_loganalyticsworkspace: + name: "{{ name }}" + resource_group: "{{ resource_group }}" + state: absent + register: output + +- assert: + that: + - not output.changed diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/aliases new file mode 100644 index 000000000..e42dbab0e --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/aliases @@ -0,0 +1,2 @@ +cloud/azure +shippable/azure/group1 diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/playbooks/create_inventory_config.yml b/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/playbooks/create_inventory_config.yml new file mode 100644 index 000000000..6ed4067af --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/playbooks/create_inventory_config.yml @@ -0,0 +1,11 @@ +--- +- hosts: localhost + connection: local + gather_facts: no + vars: + template_name: "../templates/{{ template | default('basic.yml') }}" + tasks: + - name: write inventory config file + copy: + dest: ../test.azure_rm.yml + content: "{{ lookup('template', template_name) }}" diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/playbooks/empty_inventory_config.yml b/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/playbooks/empty_inventory_config.yml new file mode 100644 index 000000000..06a427931 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/playbooks/empty_inventory_config.yml @@ -0,0 +1,9 @@ +--- +- hosts: localhost + connection: local + gather_facts: no + tasks: + - name: write inventory config file + copy: + dest: ../test.azure_rm.yml + content: "" diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/playbooks/setup.yml b/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/playbooks/setup.yml new file mode 100644 index 000000000..a8358089f --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/playbooks/setup.yml @@ -0,0 +1,48 @@ +--- +- hosts: localhost + connection: local + gather_facts: no + + tasks: + - include_vars: vars.yml + - name: SETUP | Create storage account + azure_rm_storageaccount: + resource_group: "{{ resource_group }}" + name: "{{ storage_account }}" + account_type: Standard_LRS + + - name: SETUP | Create availability set + azure_rm_availabilityset: + name: "{{ availability_set }}" + resource_group: "{{ resource_group }}" + + - name: SETUP | Create virtual network + azure_rm_virtualnetwork: + resource_group: "{{ resource_group }}" + name: "{{ network_name }}" + address_prefixes: "{{ network }}" + + - name: SETUP | Create subnet + azure_rm_subnet: + resource_group: "{{ resource_group }}" + name: "{{ subnet_name }}" + address_prefix: "{{ subnet }}" + virtual_network: "{{ network_name }}" + + - name: Create minimal VM with defaults + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + admin_username: testuser + ssh_password_enabled: false + ssh_public_keys: + - path: /home/testuser/.ssh/authorized_keys + key_data: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfoYlIV4lTPZTv7hXaVwQQuqBgGs4yeNRX0SPo2+HQt9u4X7IGwrtXc0nEUm6LfaCikMH58bOL8f20NTGz285kxdFHZRcBXtqmnMz2rXwhK9gwq5h1khc+GzHtdcJXsGA4y0xuaNcidcg04jxAlN/06fwb/VYwwWTVbypNC0gpGEpWckCNm8vlDlA55sU5et0SZ+J0RKVvEaweUOeNbFZqckGPA384imfeYlADppK/7eAxqfBVadVvZG8IJk4yvATgaIENIFj2cXxqu2mQ/Bp5Wr45uApvJsFXmi+v/nkiOEV1QpLOnEwAZo6EfFS4CCQtsymxJCl1PxdJ5LD4ZOtP xiuxi.sun@qq.com" + vm_size: Standard_B1ms + virtual_network: "{{ network_name }}" + image: + offer: UbuntuServer + publisher: Canonical + sku: 16.04-LTS + version: latest + register: vm_output diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/playbooks/teardown.yml b/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/playbooks/teardown.yml new file mode 100644 index 000000000..3a1adfdd4 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/playbooks/teardown.yml @@ -0,0 +1,40 @@ +--- +- hosts: localhost + connection: local + gather_facts: no + + tasks: + - include_vars: vars.yml + + - name: Delete VM + azure_rm_virtualmachine: + resource_group: "{{ resource_group }}" + name: "{{ vm_name }}" + remove_on_absent: all_autocreated + state: absent + + - name: Destroy subnet + azure_rm_subnet: + resource_group: "{{ resource_group }}" + virtual_network: "{{ network_name }}" + name: "{{ subnet_name }}" + state: absent + + - name: Destroy virtual network + azure_rm_virtualnetwork: + resource_group: "{{ resource_group }}" + name: "{{ network_name }}" + state: absent + + - name: Destroy availability set + azure_rm_availabilityset: + resource_group: "{{ resource_group }}" + name: "{{ availability_set }}" + state: absent + + - name: Destroy storage account + azure_rm_storageaccount: + resource_group: "{{ resource_group }}" + name: "{{ storage_account }}" + force_delete_nonempty: yes + state: absent \ No newline at end of file diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/playbooks/test_inventory.yml b/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/playbooks/test_inventory.yml new file mode 100644 index 000000000..fd39a273c --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/playbooks/test_inventory.yml @@ -0,0 +1,16 @@ +--- +- hosts: localhost + connection: local + gather_facts: no + tasks: + - include_vars: vars.yml + + - name: Pause for 10 mimutes for updating + shell: sleep 600 + + - meta: refresh_inventory + + - name: Test Inventory + assert: + that: + - vm_name in hostvars diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/playbooks/vars.yml b/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/playbooks/vars.yml new file mode 100644 index 000000000..dc6bbe080 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/playbooks/vars.yml @@ -0,0 +1,14 @@ +--- +uid: "{{ (resource_group ~ inventory_hostname) | hash('md5') | truncate(18, True, '') }}" +uid_short: "{{ (resource_group ~ inventory_hostname) | hash('md5') | truncate(10, True, '') }}" + +storage_account: "{{ 'stor' ~ uid }}" +availability_set: "{{ 'avbs' ~ uid_short }}" +vm_name: "{{ 'vm' ~ uid_short }}" +network_name: "{{ 'vnet' ~ uid_short }}" +subnet_name: "{{ 'snet' ~ uid_short }}" +security_group: "{{ 'sg' ~ uid_short }}" +public_ip_name: "{{ 'ip' ~ uid_short }}" +interface_name: "{{ 'int' ~ uid_short }}" +network: 10.42.0.0/24 +subnet: 10.42.0.0/28 diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/runme.sh b/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/runme.sh new file mode 100644 index 000000000..6f381c0a5 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/runme.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +set -eux + +# make sure inventory is empty at the begining of the tests +ansible-playbook playbooks/empty_inventory_config.yml "$@" + +# create vm +ansible-playbook playbooks/setup.yml "$@" + +export ANSIBLE_INVENTORY=test.azure_rm.yml + +# using fully qualified name +ansible-playbook playbooks/create_inventory_config.yml "$@" +ansible-playbook playbooks/test_inventory.yml "$@" + +# using short name +ansible-playbook playbooks/empty_inventory_config.yml "$@" +ansible-playbook playbooks/create_inventory_config.yml "$@" --extra-vars "template=basic2.yml" +ansible-playbook playbooks/test_inventory.yml "$@" + + +# teardown +ansible-playbook playbooks/teardown.yml "$@" diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/templates/basic.yml b/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/templates/basic.yml new file mode 100644 index 000000000..636049d63 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/templates/basic.yml @@ -0,0 +1,3 @@ +--- +plugin: azure.azcollection.azure_rm +plain_host_names: yes \ No newline at end of file diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/templates/basic2.yml b/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/templates/basic2.yml new file mode 100644 index 000000000..89ec2e131 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/templates/basic2.yml @@ -0,0 +1,3 @@ +--- +plugin: azure_rm +plain_host_names: yes \ No newline at end of file diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/test.azure_rm.yml b/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/test.azure_rm.yml new file mode 100644 index 000000000..e69de29bb diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/setup_azure/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/setup_azure/aliases new file mode 100644 index 000000000..e69de29bb diff --git a/ansible_collections/azure/azcollection/tests/sanity/ignore-2.10.txt b/ansible_collections/azure/azcollection/tests/sanity/ignore-2.10.txt new file mode 100644 index 000000000..bde4b19b6 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/sanity/ignore-2.10.txt @@ -0,0 +1,270 @@ +plugins/modules/azure_rm_aks.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_aks.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_aks.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_aks.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_aks.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_aks.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_aks_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_aksversion_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_apimanagement.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_appgateway.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_appgateway.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_appgateway.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_appgateway.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_applicationsecuritygroup.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_applicationsecuritygroup_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_appserviceplan.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_appserviceplan_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_availabilityset.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_containerregistryreplication.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_containerregistryreplication.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_containerregistryreplication.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_containerregistryreplication.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_containerregistryreplication_info.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_containerregistryreplication_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_containerregistryreplication_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_containerregistryreplication_info.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_containerregistrywebhook.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_containerregistrywebhook.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_containerregistrywebhook.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_containerregistrywebhook.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_containerregistrywebhook.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_containerregistrywebhook_info.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_containerregistrywebhook_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_containerregistrywebhook_info.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_datalakestore.py validate-modules:nonexistent-parameter-documented +plugins/modules/azure_rm_deployment.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_deployment.py validate-modules:return-syntax-error +plugins/modules/azure_rm_deployment_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_deployment_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_dnsrecordset.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_dnszone.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_dnszone_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_dnszone_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_functionapp.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_functionapp.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_functionapp_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_gallery.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_galleryimage.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_galleryimage.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_galleryimage.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_galleryimage_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_galleryimageversion.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_galleryimageversion.py validate-modules:doc-type-does-not-match-spec +plugins/modules/azure_rm_galleryimageversion.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_galleryimageversion.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_galleryimageversion.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_galleryimageversion.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_iothubconsumergroup.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_keyvault.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_keyvault.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_keyvault.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_keyvault.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_keyvaultkey.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_keyvaultkey_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_keyvaultsecret.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_manageddisk.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_manageddisk_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_managementgroup.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_networkinterface.py validate-modules:doc-missing-type +plugins/modules/azure_rm_networkinterface.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_networkinterface.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_networkinterface.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_networkinterface.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_networkinterface_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_networkinterface_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_publicipaddress.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_publicipaddress.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_publicipaddress.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_publicipaddress_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_resource.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_resource.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_resource_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_resource_info.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_resourcegroup.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_resourcegroup_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_registrationdefinition.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_registrationassignment.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_securitygroup.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_securitygroup.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_securitygroup.py validate-modules:missing-suboption-docs +plugins/modules/azure_rm_securitygroup.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_securitygroup.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_securitygroup.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_securitygroup.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_securitygroup.py validate-modules:mutually_exclusive-unknown +plugins/modules/azure_rm_securitygroup_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_storageaccount.py validate-modules:doc-missing-type +plugins/modules/azure_rm_storageaccount.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_storageaccount.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_storageaccount_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_storageaccount_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_storageblob.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_subnet.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_subnet.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_subnet_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachine.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachine.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_virtualmachine.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_virtualmachine.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_virtualmachine_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachineimage_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachinescaleset.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachinescaleset.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_virtualmachinescaleset.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_virtualmachinescaleset_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachinescalesetextension.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachinescalesetextension.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_virtualmachinescalesetextension_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachinescalesetinstance.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachinescalesetinstance.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_virtualmachinescalesetinstance_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualwan.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualwan.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_virtualwan.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_webapp.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_webapp.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_webapp.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_webapp_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_webappslot.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_webappslot.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_webappslot.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_autoscale.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_autoscale.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_autoscale.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_autoscale_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_cdnendpoint.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_cdnendpoint.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_cdnendpoint.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_cosmosdbaccount.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_cosmosdbaccount.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_cosmosdbaccount.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_cosmosdbaccount.py validate-modules:nonexistent-parameter-documented +plugins/modules/azure_rm_cosmosdbaccount.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_devtestlab.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_devtestlab_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_devtestlabarmtemplate_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_devtestlabartifact_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_devtestlabartifactsource.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_devtestlabartifactsource_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_devtestlabcustomimage.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_devtestlabcustomimage_info.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_devtestlabcustomimage_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_devtestlabenvironment.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_devtestlabenvironment_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_devtestlabpolicy.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_devtestlabpolicy_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_devtestlabvirtualmachine.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_devtestlabvirtualmachine.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_devtestlabvirtualmachine.py validate-modules:nonexistent-parameter-documented +plugins/modules/azure_rm_devtestlabvirtualmachine.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_devtestlabvirtualmachine_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_devtestlabvirtualnetwork.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_hdinsightcluster.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_hdinsightcluster.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_loadbalancer.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_loadbalancer.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_loadbalancer.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_loadbalancer.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_loadbalancer.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_loganalyticsworkspace.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_loganalyticsworkspace_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_mariadbconfiguration_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_mariadbdatabase.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_mariadbfirewallrule.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_mariadbserver.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_mysqlconfiguration.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_mysqlconfiguration_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_mysqldatabase.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_mysqlfirewallrule.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_mysqlfirewallrule.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_postgresqlconfiguration.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_postgresqlconfiguration_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_postgresqldatabase.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_postgresqlfirewallrule.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_rediscache.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_rediscache.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_rediscache.py validate-modules:doc-type-does-not-match-spec +plugins/modules/azure_rm_rediscache.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_rediscache_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_rediscachefirewallrule.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_roleassignment.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_roleassignment_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_route.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_routetable.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_routetable_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_servicebus.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_servicebus_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_servicebus_info.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_servicebusqueue.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_servicebustopic.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_servicebustopic.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_servicebustopicsubscription.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_servicebussaspolicy.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_servicebussaspolicy.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_snapshot.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_snapshot.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_sqldatabase.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_sqldatabase_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_sqlfirewallrule.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_sqlfirewallrule_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_sqlserver.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_sqlserver_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_trafficmanager.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_trafficmanager.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_trafficmanager.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_trafficmanager.py validate-modules:invalid-documentation +plugins/modules/azure_rm_trafficmanager.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_trafficmanager.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_trafficmanager.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_trafficmanagerendpoint.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_trafficmanagerendpoint_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_trafficmanagerprofile.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_trafficmanagerprofile.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_trafficmanagerprofile.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_trafficmanagerprofile.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_trafficmanagerprofile_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualnetwork.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualnetwork.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_virtualnetwork_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualnetworkpeering.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualnetworkpeering.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_virtualnetworkpeering_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualnetworkgateway.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualnetworkgateway.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_virtualnetworkgateway.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_virtualnetworkgateway.py validate-modules:doc-missing-type +plugins/modules/azure_rm_virtualnetworkgateway.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_virtualnetworkgateway.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_vpnsite.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_batchaccount.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_batchaccount.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_cdnendpoint_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_cdnendpoint_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_cdnprofile.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_cosmosdbaccount_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_devtestlabschedule.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_devtestlabschedule_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_hdinsightcluster_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_hdinsightcluster_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_lock_info.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_cdnprofile_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_azurefirewall.py validate-modules:missing-suboption-docs +plugins/modules/azure_rm_azurefirewall.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_azurefirewall.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_azurefirewall.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_azurefirewall.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_adpassword.py validate-modules:return-syntax-error +plugins/modules/azure_rm_adpassword.py validate-modules:import-before-documentation +plugins/modules/azure_rm_adpassword.py validate-modules:nonexistent-parameter-documented +plugins/modules/azure_rm_adpassword_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_adpassword_info.py validate-modules:import-before-documentation +plugins/modules/azure_rm_adserviceprincipal.py validate-modules:nonexistent-parameter-documented +plugins/modules/azure_rm_apimanagementservice.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_openshiftmanagedcluster.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_openshiftmanagedcluster.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_subnet.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_vmbackuppolicy.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_privatednsrecordset.py validate-modules:invalid-ansiblemodule-schema +tests/utils/shippable/check_matrix.py replace-urlopen +tests/utils/shippable/timing.py shebang diff --git a/ansible_collections/azure/azcollection/tests/sanity/ignore-2.11.txt b/ansible_collections/azure/azcollection/tests/sanity/ignore-2.11.txt new file mode 100644 index 000000000..bde4b19b6 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/sanity/ignore-2.11.txt @@ -0,0 +1,270 @@ +plugins/modules/azure_rm_aks.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_aks.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_aks.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_aks.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_aks.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_aks.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_aks_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_aksversion_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_apimanagement.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_appgateway.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_appgateway.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_appgateway.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_appgateway.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_applicationsecuritygroup.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_applicationsecuritygroup_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_appserviceplan.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_appserviceplan_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_availabilityset.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_containerregistryreplication.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_containerregistryreplication.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_containerregistryreplication.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_containerregistryreplication.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_containerregistryreplication_info.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_containerregistryreplication_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_containerregistryreplication_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_containerregistryreplication_info.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_containerregistrywebhook.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_containerregistrywebhook.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_containerregistrywebhook.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_containerregistrywebhook.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_containerregistrywebhook.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_containerregistrywebhook_info.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_containerregistrywebhook_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_containerregistrywebhook_info.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_datalakestore.py validate-modules:nonexistent-parameter-documented +plugins/modules/azure_rm_deployment.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_deployment.py validate-modules:return-syntax-error +plugins/modules/azure_rm_deployment_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_deployment_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_dnsrecordset.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_dnszone.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_dnszone_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_dnszone_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_functionapp.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_functionapp.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_functionapp_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_gallery.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_galleryimage.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_galleryimage.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_galleryimage.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_galleryimage_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_galleryimageversion.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_galleryimageversion.py validate-modules:doc-type-does-not-match-spec +plugins/modules/azure_rm_galleryimageversion.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_galleryimageversion.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_galleryimageversion.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_galleryimageversion.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_iothubconsumergroup.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_keyvault.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_keyvault.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_keyvault.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_keyvault.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_keyvaultkey.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_keyvaultkey_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_keyvaultsecret.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_manageddisk.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_manageddisk_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_managementgroup.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_networkinterface.py validate-modules:doc-missing-type +plugins/modules/azure_rm_networkinterface.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_networkinterface.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_networkinterface.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_networkinterface.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_networkinterface_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_networkinterface_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_publicipaddress.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_publicipaddress.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_publicipaddress.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_publicipaddress_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_resource.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_resource.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_resource_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_resource_info.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_resourcegroup.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_resourcegroup_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_registrationdefinition.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_registrationassignment.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_securitygroup.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_securitygroup.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_securitygroup.py validate-modules:missing-suboption-docs +plugins/modules/azure_rm_securitygroup.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_securitygroup.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_securitygroup.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_securitygroup.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_securitygroup.py validate-modules:mutually_exclusive-unknown +plugins/modules/azure_rm_securitygroup_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_storageaccount.py validate-modules:doc-missing-type +plugins/modules/azure_rm_storageaccount.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_storageaccount.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_storageaccount_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_storageaccount_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_storageblob.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_subnet.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_subnet.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_subnet_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachine.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachine.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_virtualmachine.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_virtualmachine.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_virtualmachine_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachineimage_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachinescaleset.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachinescaleset.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_virtualmachinescaleset.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_virtualmachinescaleset_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachinescalesetextension.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachinescalesetextension.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_virtualmachinescalesetextension_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachinescalesetinstance.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachinescalesetinstance.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_virtualmachinescalesetinstance_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualwan.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualwan.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_virtualwan.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_webapp.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_webapp.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_webapp.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_webapp_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_webappslot.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_webappslot.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_webappslot.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_autoscale.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_autoscale.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_autoscale.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_autoscale_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_cdnendpoint.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_cdnendpoint.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_cdnendpoint.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_cosmosdbaccount.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_cosmosdbaccount.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_cosmosdbaccount.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_cosmosdbaccount.py validate-modules:nonexistent-parameter-documented +plugins/modules/azure_rm_cosmosdbaccount.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_devtestlab.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_devtestlab_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_devtestlabarmtemplate_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_devtestlabartifact_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_devtestlabartifactsource.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_devtestlabartifactsource_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_devtestlabcustomimage.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_devtestlabcustomimage_info.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_devtestlabcustomimage_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_devtestlabenvironment.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_devtestlabenvironment_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_devtestlabpolicy.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_devtestlabpolicy_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_devtestlabvirtualmachine.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_devtestlabvirtualmachine.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_devtestlabvirtualmachine.py validate-modules:nonexistent-parameter-documented +plugins/modules/azure_rm_devtestlabvirtualmachine.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_devtestlabvirtualmachine_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_devtestlabvirtualnetwork.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_hdinsightcluster.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_hdinsightcluster.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_loadbalancer.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_loadbalancer.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_loadbalancer.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_loadbalancer.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_loadbalancer.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_loganalyticsworkspace.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_loganalyticsworkspace_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_mariadbconfiguration_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_mariadbdatabase.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_mariadbfirewallrule.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_mariadbserver.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_mysqlconfiguration.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_mysqlconfiguration_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_mysqldatabase.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_mysqlfirewallrule.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_mysqlfirewallrule.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_postgresqlconfiguration.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_postgresqlconfiguration_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_postgresqldatabase.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_postgresqlfirewallrule.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_rediscache.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_rediscache.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_rediscache.py validate-modules:doc-type-does-not-match-spec +plugins/modules/azure_rm_rediscache.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_rediscache_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_rediscachefirewallrule.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_roleassignment.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_roleassignment_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_route.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_routetable.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_routetable_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_servicebus.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_servicebus_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_servicebus_info.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_servicebusqueue.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_servicebustopic.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_servicebustopic.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_servicebustopicsubscription.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_servicebussaspolicy.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_servicebussaspolicy.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_snapshot.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_snapshot.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_sqldatabase.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_sqldatabase_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_sqlfirewallrule.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_sqlfirewallrule_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_sqlserver.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_sqlserver_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_trafficmanager.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_trafficmanager.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_trafficmanager.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_trafficmanager.py validate-modules:invalid-documentation +plugins/modules/azure_rm_trafficmanager.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_trafficmanager.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_trafficmanager.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_trafficmanagerendpoint.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_trafficmanagerendpoint_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_trafficmanagerprofile.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_trafficmanagerprofile.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_trafficmanagerprofile.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_trafficmanagerprofile.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_trafficmanagerprofile_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualnetwork.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualnetwork.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_virtualnetwork_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualnetworkpeering.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualnetworkpeering.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_virtualnetworkpeering_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualnetworkgateway.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualnetworkgateway.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_virtualnetworkgateway.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_virtualnetworkgateway.py validate-modules:doc-missing-type +plugins/modules/azure_rm_virtualnetworkgateway.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_virtualnetworkgateway.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_vpnsite.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_batchaccount.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_batchaccount.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_cdnendpoint_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_cdnendpoint_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_cdnprofile.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_cosmosdbaccount_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_devtestlabschedule.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_devtestlabschedule_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_hdinsightcluster_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_hdinsightcluster_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_lock_info.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_cdnprofile_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_azurefirewall.py validate-modules:missing-suboption-docs +plugins/modules/azure_rm_azurefirewall.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_azurefirewall.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_azurefirewall.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_azurefirewall.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_adpassword.py validate-modules:return-syntax-error +plugins/modules/azure_rm_adpassword.py validate-modules:import-before-documentation +plugins/modules/azure_rm_adpassword.py validate-modules:nonexistent-parameter-documented +plugins/modules/azure_rm_adpassword_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_adpassword_info.py validate-modules:import-before-documentation +plugins/modules/azure_rm_adserviceprincipal.py validate-modules:nonexistent-parameter-documented +plugins/modules/azure_rm_apimanagementservice.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_openshiftmanagedcluster.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_openshiftmanagedcluster.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_subnet.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_vmbackuppolicy.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_privatednsrecordset.py validate-modules:invalid-ansiblemodule-schema +tests/utils/shippable/check_matrix.py replace-urlopen +tests/utils/shippable/timing.py shebang diff --git a/ansible_collections/azure/azcollection/tests/sanity/ignore-2.12.txt b/ansible_collections/azure/azcollection/tests/sanity/ignore-2.12.txt new file mode 100644 index 000000000..bde4b19b6 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/sanity/ignore-2.12.txt @@ -0,0 +1,270 @@ +plugins/modules/azure_rm_aks.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_aks.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_aks.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_aks.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_aks.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_aks.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_aks_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_aksversion_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_apimanagement.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_appgateway.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_appgateway.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_appgateway.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_appgateway.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_applicationsecuritygroup.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_applicationsecuritygroup_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_appserviceplan.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_appserviceplan_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_availabilityset.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_containerregistryreplication.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_containerregistryreplication.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_containerregistryreplication.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_containerregistryreplication.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_containerregistryreplication_info.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_containerregistryreplication_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_containerregistryreplication_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_containerregistryreplication_info.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_containerregistrywebhook.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_containerregistrywebhook.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_containerregistrywebhook.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_containerregistrywebhook.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_containerregistrywebhook.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_containerregistrywebhook_info.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_containerregistrywebhook_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_containerregistrywebhook_info.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_datalakestore.py validate-modules:nonexistent-parameter-documented +plugins/modules/azure_rm_deployment.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_deployment.py validate-modules:return-syntax-error +plugins/modules/azure_rm_deployment_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_deployment_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_dnsrecordset.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_dnszone.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_dnszone_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_dnszone_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_functionapp.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_functionapp.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_functionapp_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_gallery.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_galleryimage.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_galleryimage.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_galleryimage.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_galleryimage_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_galleryimageversion.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_galleryimageversion.py validate-modules:doc-type-does-not-match-spec +plugins/modules/azure_rm_galleryimageversion.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_galleryimageversion.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_galleryimageversion.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_galleryimageversion.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_iothubconsumergroup.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_keyvault.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_keyvault.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_keyvault.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_keyvault.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_keyvaultkey.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_keyvaultkey_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_keyvaultsecret.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_manageddisk.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_manageddisk_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_managementgroup.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_networkinterface.py validate-modules:doc-missing-type +plugins/modules/azure_rm_networkinterface.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_networkinterface.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_networkinterface.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_networkinterface.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_networkinterface_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_networkinterface_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_publicipaddress.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_publicipaddress.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_publicipaddress.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_publicipaddress_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_resource.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_resource.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_resource_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_resource_info.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_resourcegroup.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_resourcegroup_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_registrationdefinition.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_registrationassignment.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_securitygroup.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_securitygroup.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_securitygroup.py validate-modules:missing-suboption-docs +plugins/modules/azure_rm_securitygroup.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_securitygroup.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_securitygroup.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_securitygroup.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_securitygroup.py validate-modules:mutually_exclusive-unknown +plugins/modules/azure_rm_securitygroup_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_storageaccount.py validate-modules:doc-missing-type +plugins/modules/azure_rm_storageaccount.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_storageaccount.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_storageaccount_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_storageaccount_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_storageblob.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_subnet.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_subnet.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_subnet_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachine.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachine.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_virtualmachine.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_virtualmachine.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_virtualmachine_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachineimage_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachinescaleset.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachinescaleset.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_virtualmachinescaleset.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_virtualmachinescaleset_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachinescalesetextension.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachinescalesetextension.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_virtualmachinescalesetextension_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachinescalesetinstance.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachinescalesetinstance.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_virtualmachinescalesetinstance_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualwan.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualwan.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_virtualwan.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_webapp.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_webapp.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_webapp.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_webapp_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_webappslot.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_webappslot.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_webappslot.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_autoscale.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_autoscale.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_autoscale.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_autoscale_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_cdnendpoint.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_cdnendpoint.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_cdnendpoint.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_cosmosdbaccount.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_cosmosdbaccount.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_cosmosdbaccount.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_cosmosdbaccount.py validate-modules:nonexistent-parameter-documented +plugins/modules/azure_rm_cosmosdbaccount.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_devtestlab.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_devtestlab_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_devtestlabarmtemplate_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_devtestlabartifact_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_devtestlabartifactsource.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_devtestlabartifactsource_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_devtestlabcustomimage.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_devtestlabcustomimage_info.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_devtestlabcustomimage_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_devtestlabenvironment.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_devtestlabenvironment_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_devtestlabpolicy.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_devtestlabpolicy_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_devtestlabvirtualmachine.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_devtestlabvirtualmachine.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_devtestlabvirtualmachine.py validate-modules:nonexistent-parameter-documented +plugins/modules/azure_rm_devtestlabvirtualmachine.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_devtestlabvirtualmachine_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_devtestlabvirtualnetwork.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_hdinsightcluster.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_hdinsightcluster.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_loadbalancer.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_loadbalancer.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_loadbalancer.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_loadbalancer.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_loadbalancer.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_loganalyticsworkspace.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_loganalyticsworkspace_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_mariadbconfiguration_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_mariadbdatabase.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_mariadbfirewallrule.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_mariadbserver.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_mysqlconfiguration.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_mysqlconfiguration_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_mysqldatabase.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_mysqlfirewallrule.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_mysqlfirewallrule.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_postgresqlconfiguration.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_postgresqlconfiguration_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_postgresqldatabase.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_postgresqlfirewallrule.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_rediscache.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_rediscache.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_rediscache.py validate-modules:doc-type-does-not-match-spec +plugins/modules/azure_rm_rediscache.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_rediscache_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_rediscachefirewallrule.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_roleassignment.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_roleassignment_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_route.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_routetable.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_routetable_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_servicebus.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_servicebus_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_servicebus_info.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_servicebusqueue.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_servicebustopic.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_servicebustopic.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_servicebustopicsubscription.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_servicebussaspolicy.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_servicebussaspolicy.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_snapshot.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_snapshot.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_sqldatabase.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_sqldatabase_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_sqlfirewallrule.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_sqlfirewallrule_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_sqlserver.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_sqlserver_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_trafficmanager.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_trafficmanager.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_trafficmanager.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_trafficmanager.py validate-modules:invalid-documentation +plugins/modules/azure_rm_trafficmanager.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_trafficmanager.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_trafficmanager.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_trafficmanagerendpoint.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_trafficmanagerendpoint_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_trafficmanagerprofile.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_trafficmanagerprofile.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_trafficmanagerprofile.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_trafficmanagerprofile.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_trafficmanagerprofile_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualnetwork.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualnetwork.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_virtualnetwork_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualnetworkpeering.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualnetworkpeering.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_virtualnetworkpeering_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualnetworkgateway.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualnetworkgateway.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_virtualnetworkgateway.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_virtualnetworkgateway.py validate-modules:doc-missing-type +plugins/modules/azure_rm_virtualnetworkgateway.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_virtualnetworkgateway.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_vpnsite.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_batchaccount.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_batchaccount.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_cdnendpoint_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_cdnendpoint_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_cdnprofile.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_cosmosdbaccount_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_devtestlabschedule.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_devtestlabschedule_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_hdinsightcluster_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_hdinsightcluster_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_lock_info.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_cdnprofile_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_azurefirewall.py validate-modules:missing-suboption-docs +plugins/modules/azure_rm_azurefirewall.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_azurefirewall.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_azurefirewall.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_azurefirewall.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_adpassword.py validate-modules:return-syntax-error +plugins/modules/azure_rm_adpassword.py validate-modules:import-before-documentation +plugins/modules/azure_rm_adpassword.py validate-modules:nonexistent-parameter-documented +plugins/modules/azure_rm_adpassword_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_adpassword_info.py validate-modules:import-before-documentation +plugins/modules/azure_rm_adserviceprincipal.py validate-modules:nonexistent-parameter-documented +plugins/modules/azure_rm_apimanagementservice.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_openshiftmanagedcluster.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_openshiftmanagedcluster.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_subnet.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_vmbackuppolicy.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_privatednsrecordset.py validate-modules:invalid-ansiblemodule-schema +tests/utils/shippable/check_matrix.py replace-urlopen +tests/utils/shippable/timing.py shebang diff --git a/ansible_collections/azure/azcollection/tests/sanity/ignore-2.13.txt b/ansible_collections/azure/azcollection/tests/sanity/ignore-2.13.txt new file mode 100644 index 000000000..8d42b0a2c --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/sanity/ignore-2.13.txt @@ -0,0 +1,269 @@ +plugins/modules/azure_rm_aks.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_aks.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_aks.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_aks.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_aks.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_aks.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_aks_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_aksversion_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_apimanagement.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_appgateway.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_appgateway.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_appgateway.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_appgateway.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_applicationsecuritygroup.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_applicationsecuritygroup_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_appserviceplan.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_appserviceplan_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_availabilityset.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_containerregistryreplication.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_containerregistryreplication.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_containerregistryreplication.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_containerregistryreplication.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_containerregistryreplication_info.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_containerregistryreplication_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_containerregistryreplication_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_containerregistryreplication_info.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_containerregistrywebhook.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_containerregistrywebhook.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_containerregistrywebhook.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_containerregistrywebhook.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_containerregistrywebhook.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_containerregistrywebhook_info.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_containerregistrywebhook_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_containerregistrywebhook_info.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_datalakestore.py validate-modules:nonexistent-parameter-documented +plugins/modules/azure_rm_deployment.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_deployment.py validate-modules:return-syntax-error +plugins/modules/azure_rm_deployment_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_deployment_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_dnsrecordset.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_dnszone.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_dnszone_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_dnszone_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_functionapp.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_functionapp.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_functionapp_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_gallery.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_galleryimage.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_galleryimage.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_galleryimage.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_galleryimage_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_galleryimageversion.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_galleryimageversion.py validate-modules:doc-type-does-not-match-spec +plugins/modules/azure_rm_galleryimageversion.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_galleryimageversion.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_galleryimageversion.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_galleryimageversion.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_iothubconsumergroup.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_keyvault.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_keyvault.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_keyvault.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_keyvault.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_keyvaultkey.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_keyvaultkey_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_keyvaultsecret.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_manageddisk.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_managementgroup.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_networkinterface.py validate-modules:doc-missing-type +plugins/modules/azure_rm_networkinterface.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_networkinterface.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_networkinterface.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_networkinterface.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_networkinterface_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_networkinterface_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_publicipaddress.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_publicipaddress.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_publicipaddress.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_publicipaddress_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_resource.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_resource.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_resource_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_resource_info.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_resourcegroup.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_resourcegroup_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_registrationdefinition.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_registrationassignment.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_securitygroup.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_securitygroup.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_securitygroup.py validate-modules:missing-suboption-docs +plugins/modules/azure_rm_securitygroup.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_securitygroup.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_securitygroup.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_securitygroup.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_securitygroup.py validate-modules:mutually_exclusive-unknown +plugins/modules/azure_rm_securitygroup_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_storageaccount.py validate-modules:doc-missing-type +plugins/modules/azure_rm_storageaccount.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_storageaccount.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_storageaccount_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_storageaccount_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_storageblob.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_subnet.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_subnet.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_subnet_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachine.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachine.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_virtualmachine.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_virtualmachine.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_virtualmachine_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachineimage_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachinescaleset.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachinescaleset.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_virtualmachinescaleset.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_virtualmachinescaleset_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachinescalesetextension.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachinescalesetextension.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_virtualmachinescalesetextension_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachinescalesetinstance.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachinescalesetinstance.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_virtualmachinescalesetinstance_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualwan.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualwan.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_virtualwan.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_webapp.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_webapp.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_webapp.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_webapp_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_webappslot.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_webappslot.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_webappslot.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_autoscale.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_autoscale.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_autoscale.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_autoscale_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_cdnendpoint.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_cdnendpoint.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_cdnendpoint.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_cosmosdbaccount.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_cosmosdbaccount.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_cosmosdbaccount.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_cosmosdbaccount.py validate-modules:nonexistent-parameter-documented +plugins/modules/azure_rm_cosmosdbaccount.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_devtestlab.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_devtestlab_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_devtestlabarmtemplate_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_devtestlabartifact_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_devtestlabartifactsource.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_devtestlabartifactsource_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_devtestlabcustomimage.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_devtestlabcustomimage_info.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_devtestlabcustomimage_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_devtestlabenvironment.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_devtestlabenvironment_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_devtestlabpolicy.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_devtestlabpolicy_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_devtestlabvirtualmachine.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_devtestlabvirtualmachine.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_devtestlabvirtualmachine.py validate-modules:nonexistent-parameter-documented +plugins/modules/azure_rm_devtestlabvirtualmachine.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_devtestlabvirtualmachine_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_devtestlabvirtualnetwork.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_hdinsightcluster.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_hdinsightcluster.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_loadbalancer.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_loadbalancer.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_loadbalancer.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_loadbalancer.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_loadbalancer.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_loganalyticsworkspace.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_loganalyticsworkspace_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_mariadbconfiguration_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_mariadbdatabase.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_mariadbfirewallrule.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_mariadbserver.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_mysqlconfiguration.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_mysqlconfiguration_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_mysqldatabase.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_mysqlfirewallrule.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_mysqlfirewallrule.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_postgresqlconfiguration.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_postgresqlconfiguration_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_postgresqldatabase.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_postgresqlfirewallrule.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_rediscache.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_rediscache.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_rediscache.py validate-modules:doc-type-does-not-match-spec +plugins/modules/azure_rm_rediscache.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_rediscache_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_rediscachefirewallrule.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_roleassignment.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_roleassignment_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_route.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_routetable.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_routetable_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_servicebus.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_servicebus_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_servicebus_info.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_servicebusqueue.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_servicebustopic.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_servicebustopic.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_servicebustopicsubscription.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_servicebussaspolicy.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_servicebussaspolicy.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_snapshot.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_snapshot.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_sqldatabase.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_sqldatabase_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_sqlfirewallrule.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_sqlfirewallrule_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_sqlserver.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_sqlserver_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_trafficmanager.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_trafficmanager.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_trafficmanager.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_trafficmanager.py validate-modules:invalid-documentation +plugins/modules/azure_rm_trafficmanager.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_trafficmanager.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_trafficmanager.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_trafficmanagerendpoint.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_trafficmanagerendpoint_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_trafficmanagerprofile.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_trafficmanagerprofile.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_trafficmanagerprofile.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_trafficmanagerprofile.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_trafficmanagerprofile_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualnetwork.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualnetwork.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_virtualnetwork_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualnetworkpeering.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualnetworkpeering.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_virtualnetworkpeering_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualnetworkgateway.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualnetworkgateway.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_virtualnetworkgateway.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_virtualnetworkgateway.py validate-modules:doc-missing-type +plugins/modules/azure_rm_virtualnetworkgateway.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_virtualnetworkgateway.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_vpnsite.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_batchaccount.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_batchaccount.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_cdnendpoint_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_cdnendpoint_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_cdnprofile.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_cosmosdbaccount_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_devtestlabschedule.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_devtestlabschedule_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_hdinsightcluster_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_hdinsightcluster_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_lock_info.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_cdnprofile_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_azurefirewall.py validate-modules:missing-suboption-docs +plugins/modules/azure_rm_azurefirewall.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_azurefirewall.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_azurefirewall.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_azurefirewall.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_adpassword.py validate-modules:return-syntax-error +plugins/modules/azure_rm_adpassword.py validate-modules:import-before-documentation +plugins/modules/azure_rm_adpassword.py validate-modules:nonexistent-parameter-documented +plugins/modules/azure_rm_adpassword_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_adpassword_info.py validate-modules:import-before-documentation +plugins/modules/azure_rm_adserviceprincipal.py validate-modules:nonexistent-parameter-documented +plugins/modules/azure_rm_apimanagementservice.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_openshiftmanagedcluster.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_openshiftmanagedcluster.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_subnet.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_vmbackuppolicy.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_privatednsrecordset.py validate-modules:invalid-ansiblemodule-schema +tests/utils/shippable/check_matrix.py replace-urlopen +tests/utils/shippable/timing.py shebang diff --git a/ansible_collections/azure/azcollection/tests/sanity/ignore-2.14.txt b/ansible_collections/azure/azcollection/tests/sanity/ignore-2.14.txt new file mode 100644 index 000000000..20be76d1f --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/sanity/ignore-2.14.txt @@ -0,0 +1,266 @@ +plugins/modules/azure_rm_aks.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_aks.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_aks.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_aks.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_aks.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_aks.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_aks_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_aksversion_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_apimanagement.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_appgateway.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_appgateway.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_appgateway.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_appgateway.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_applicationsecuritygroup.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_applicationsecuritygroup_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_appserviceplan.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_appserviceplan_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_availabilityset.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_containerregistryreplication.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_containerregistryreplication.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_containerregistryreplication.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_containerregistryreplication.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_containerregistryreplication_info.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_containerregistryreplication_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_containerregistryreplication_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_containerregistryreplication_info.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_containerregistrywebhook.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_containerregistrywebhook.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_containerregistrywebhook.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_containerregistrywebhook.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_containerregistrywebhook.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_containerregistrywebhook_info.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_containerregistrywebhook_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_containerregistrywebhook_info.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_datalakestore.py validate-modules:nonexistent-parameter-documented +plugins/modules/azure_rm_deployment.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_deployment.py validate-modules:return-syntax-error +plugins/modules/azure_rm_deployment_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_deployment_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_dnsrecordset.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_dnszone.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_dnszone_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_dnszone_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_functionapp.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_functionapp.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_functionapp_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_gallery.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_galleryimage.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_galleryimage.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_galleryimage.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_galleryimage_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_galleryimageversion.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_galleryimageversion.py validate-modules:doc-type-does-not-match-spec +plugins/modules/azure_rm_galleryimageversion.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_galleryimageversion.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_galleryimageversion.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_galleryimageversion.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_iothubconsumergroup.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_keyvault.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_keyvault.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_keyvault.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_keyvault.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_keyvaultkey.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_keyvaultkey_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_keyvaultsecret.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_manageddisk.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_managementgroup.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_networkinterface.py validate-modules:doc-missing-type +plugins/modules/azure_rm_networkinterface.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_networkinterface.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_networkinterface.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_networkinterface.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_networkinterface_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_networkinterface_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_publicipaddress.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_publicipaddress.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_publicipaddress.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_publicipaddress_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_resource.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_resource.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_resource_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_resource_info.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_resourcegroup.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_resourcegroup_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_registrationdefinition.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_registrationassignment.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_securitygroup.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_securitygroup.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_securitygroup.py validate-modules:missing-suboption-docs +plugins/modules/azure_rm_securitygroup.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_securitygroup.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_securitygroup.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_securitygroup.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_securitygroup.py validate-modules:mutually_exclusive-unknown +plugins/modules/azure_rm_securitygroup_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_storageaccount.py validate-modules:doc-missing-type +plugins/modules/azure_rm_storageaccount.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_storageaccount.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_storageaccount_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_storageaccount_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_storageblob.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_subnet.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_subnet.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_subnet_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachine.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachine.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_virtualmachine.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_virtualmachine.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_virtualmachine_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachineimage_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachinescaleset.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachinescaleset.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_virtualmachinescaleset.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_virtualmachinescaleset_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachinescalesetextension.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachinescalesetextension.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_virtualmachinescalesetextension_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachinescalesetinstance.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachinescalesetinstance.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_virtualmachinescalesetinstance_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualwan.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualwan.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_virtualwan.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_webapp.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_webapp.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_webapp.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_webapp_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_webappslot.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_webappslot.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_webappslot.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_autoscale.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_autoscale.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_autoscale.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_autoscale_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_cdnendpoint.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_cdnendpoint.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_cdnendpoint.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_cosmosdbaccount.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_cosmosdbaccount.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_cosmosdbaccount.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_cosmosdbaccount.py validate-modules:nonexistent-parameter-documented +plugins/modules/azure_rm_cosmosdbaccount.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_devtestlab.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_devtestlab_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_devtestlabarmtemplate_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_devtestlabartifact_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_devtestlabartifactsource.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_devtestlabartifactsource_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_devtestlabcustomimage.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_devtestlabcustomimage_info.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_devtestlabcustomimage_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_devtestlabenvironment.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_devtestlabenvironment_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_devtestlabpolicy.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_devtestlabpolicy_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_devtestlabvirtualmachine.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_devtestlabvirtualmachine.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_devtestlabvirtualmachine.py validate-modules:nonexistent-parameter-documented +plugins/modules/azure_rm_devtestlabvirtualmachine.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_devtestlabvirtualmachine_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_devtestlabvirtualnetwork.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_hdinsightcluster.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_hdinsightcluster.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_loadbalancer.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_loadbalancer.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_loadbalancer.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_loadbalancer.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_loadbalancer.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_loganalyticsworkspace.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_loganalyticsworkspace_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_mariadbdatabase.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_mariadbfirewallrule.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_mariadbserver.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_mysqlconfiguration.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_mysqldatabase.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_mysqlfirewallrule.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_mysqlfirewallrule.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_postgresqlconfiguration.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_postgresqldatabase.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_postgresqlfirewallrule.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_rediscache.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_rediscache.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_rediscache.py validate-modules:doc-type-does-not-match-spec +plugins/modules/azure_rm_rediscache.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_rediscache_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_rediscachefirewallrule.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_roleassignment.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_roleassignment_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_route.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_routetable.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_routetable_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_servicebus.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_servicebus_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_servicebus_info.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_servicebusqueue.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_servicebustopic.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_servicebustopic.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_servicebustopicsubscription.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_servicebussaspolicy.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_servicebussaspolicy.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_snapshot.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_snapshot.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_sqldatabase.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_sqldatabase_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_sqlfirewallrule.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_sqlfirewallrule_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_sqlserver.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_sqlserver_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_trafficmanager.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_trafficmanager.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_trafficmanager.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_trafficmanager.py validate-modules:invalid-documentation +plugins/modules/azure_rm_trafficmanager.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_trafficmanager.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_trafficmanager.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_trafficmanagerendpoint.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_trafficmanagerendpoint_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_trafficmanagerprofile.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_trafficmanagerprofile.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_trafficmanagerprofile.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_trafficmanagerprofile.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_trafficmanagerprofile_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualnetwork.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualnetwork.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_virtualnetwork_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualnetworkpeering.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualnetworkpeering.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_virtualnetworkpeering_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualnetworkgateway.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualnetworkgateway.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_virtualnetworkgateway.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_virtualnetworkgateway.py validate-modules:doc-missing-type +plugins/modules/azure_rm_virtualnetworkgateway.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_virtualnetworkgateway.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_vpnsite.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_batchaccount.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_batchaccount.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_cdnendpoint_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_cdnendpoint_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_cdnprofile.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_cosmosdbaccount_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_devtestlabschedule.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_devtestlabschedule_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_hdinsightcluster_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_hdinsightcluster_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_lock_info.py validate-modules:doc-required-mismatch +plugins/modules/azure_rm_cdnprofile_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_azurefirewall.py validate-modules:missing-suboption-docs +plugins/modules/azure_rm_azurefirewall.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_azurefirewall.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_azurefirewall.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_azurefirewall.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_adpassword.py validate-modules:return-syntax-error +plugins/modules/azure_rm_adpassword.py validate-modules:import-before-documentation +plugins/modules/azure_rm_adpassword.py validate-modules:nonexistent-parameter-documented +plugins/modules/azure_rm_adpassword_info.py validate-modules:return-syntax-error +plugins/modules/azure_rm_adpassword_info.py validate-modules:import-before-documentation +plugins/modules/azure_rm_adserviceprincipal.py validate-modules:nonexistent-parameter-documented +plugins/modules/azure_rm_apimanagementservice.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_openshiftmanagedcluster.py validate-modules:invalid-ansiblemodule-schema +plugins/modules/azure_rm_openshiftmanagedcluster.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_subnet.py validate-modules:doc-elements-mismatch +plugins/modules/azure_rm_vmbackuppolicy.py validate-modules:parameter-list-no-elements +plugins/modules/azure_rm_privatednsrecordset.py validate-modules:invalid-ansiblemodule-schema +tests/utils/shippable/check_matrix.py replace-urlopen +tests/utils/shippable/timing.py shebang diff --git a/ansible_collections/azure/azcollection/tests/sanity/ignore-2.9.txt b/ansible_collections/azure/azcollection/tests/sanity/ignore-2.9.txt new file mode 100644 index 000000000..3947cceb3 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/sanity/ignore-2.9.txt @@ -0,0 +1,165 @@ +plugins/modules/azure_rm_aks.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_aks.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_aks.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_aks.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_aks_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_aksversion_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_appgateway.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_appgateway.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_applicationsecuritygroup.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_applicationsecuritygroup_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_appserviceplan.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_appserviceplan_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_availabilityset.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_containerregistryreplication.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_containerregistryreplication.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_containerregistryreplication.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_containerregistryreplication.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_containerregistryreplication_info.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_containerregistryreplication_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_containerregistryreplication_info.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_containerregistrywebhook.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_containerregistrywebhook.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_containerregistrywebhook.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_containerregistrywebhook.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_containerregistrywebhook_info.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_containerregistrywebhook_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_containerregistrywebhook_info.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_datalakestore.py validate-modules:nonexistent-parameter-documented +plugins/modules/azure_rm_deployment.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_deployment_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_dnszone.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_dnszone_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_functionapp.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_functionapp_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_galleryimageversion.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_galleryimageversion.py validate-modules:doc-type-does-not-match-spec +plugins/modules/azure_rm_galleryimageversion.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_keyvault.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_keyvault.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_keyvaultkey.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_keyvaultsecret.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_manageddisk.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_networkinterface.py validate-modules:doc-missing-type +plugins/modules/azure_rm_networkinterface.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_networkinterface_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_publicipaddress.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_publicipaddress_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_resource.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_resource_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_resourcegroup.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_resourcegroup_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_securitygroup.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_securitygroup.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_securitygroup.py validate-modules:missing-suboption-docs +plugins/modules/azure_rm_securitygroup.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_securitygroup.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_securitygroup_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_storageaccount.py validate-modules:doc-missing-type +plugins/modules/azure_rm_storageaccount.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_storageaccount_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_storageblob.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_subnet.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_subnet_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachine.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachine_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachineimage_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachinescaleset.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachinescaleset_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachinescalesetextension.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachinescalesetextension_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachinescalesetinstance.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualmachinescalesetinstance_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualwan.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_webapp.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_webapp_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_webappslot.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_autoscale.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_autoscale.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_autoscale_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_cdnendpoint.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_cosmosdbaccount.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_cosmosdbaccount.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_devtestlab.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_devtestlabartifactsource.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_devtestlabcustomimage.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_devtestlabpolicy.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_devtestlabvirtualmachine.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_devtestlabvirtualmachine.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_devtestlabvirtualmachine.py validate-modules:nonexistent-parameter-documented +plugins/modules/azure_rm_devtestlabvirtualnetwork.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_hdinsightcluster.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_loadbalancer.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_loadbalancer.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_loadbalancer.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_loganalyticsworkspace.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_mariadbdatabase.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_mariadbfirewallrule.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_mariadbserver.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_mysqlconfiguration.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_mysqldatabase.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_mysqlfirewallrule.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_postgresqlconfiguration.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_postgresqldatabase.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_postgresqlfirewallrule.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_rediscache.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_rediscache.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_rediscache.py validate-modules:doc-type-does-not-match-spec +plugins/modules/azure_rm_rediscache_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_rediscachefirewallrule.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_roleassignment.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_roleassignment_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_route.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_routetable.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_routetable_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_servicebus.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_servicebus_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_servicebusqueue.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_servicebustopic.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_servicebustopicsubscription.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_servicebussaspolicy.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_sqldatabase.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_sqldatabase_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_sqlfirewallrule.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_sqlfirewallrule_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_sqlserver.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_sqlserver_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_trafficmanager.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_trafficmanager.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_trafficmanager.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_trafficmanager.py validate-modules:invalid-documentation +plugins/modules/azure_rm_trafficmanager.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_trafficmanagerendpoint.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_trafficmanagerendpoint_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_trafficmanagerprofile.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_trafficmanagerprofile.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_trafficmanagerprofile.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_trafficmanagerprofile.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_trafficmanagerprofile_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualnetwork.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualnetwork_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualnetworkpeering.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualnetworkpeering_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualnetworkgateway.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_virtualnetworkgateway.py validate-modules:doc-choices-do-not-match-spec +plugins/modules/azure_rm_virtualnetworkgateway.py validate-modules:doc-default-does-not-match-spec +plugins/modules/azure_rm_virtualnetworkgateway.py validate-modules:doc-missing-type +plugins/modules/azure_rm_batchaccount.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_cdnendpoint_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_cdnprofile.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_cosmosdbaccount.py validate-modules:nonexistent-parameter-documented +plugins/modules/azure_rm_cosmosdbaccount_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_devtestlabschedule.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_hdinsightcluster_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_loganalyticsworkspace_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_cdnprofile_info.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_azurefirewall.py validate-modules:missing-suboption-docs +plugins/modules/azure_rm_azurefirewall.py validate-modules:parameter-type-not-in-doc +plugins/modules/azure_rm_azurefirewall.py validate-modules:undocumented-parameter +plugins/modules/azure_rm_adpassword.py validate-modules:nonexistent-parameter-documented +plugins/modules/azure_rm_adpassword.py validate-modules:return-syntax-error +plugins/modules/azure_rm_adpassword.py validate-modules:import-before-documentation +plugins/modules/azure_rm_adpassword_info.py validate-modules:import-before-documentation +plugins/modules/azure_rm_adserviceprincipal.py validate-modules:nonexistent-parameter-documented +tests/utils/shippable/check_matrix.py replace-urlopen +tests/utils/shippable/timing.py shebang diff --git a/ansible_collections/azure/azcollection/tests/utils/ado/ado.sh b/ansible_collections/azure/azcollection/tests/utils/ado/ado.sh new file mode 100644 index 000000000..b313208ce --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/utils/ado/ado.sh @@ -0,0 +1,115 @@ +#!/usr/bin/env bash + +set -o pipefail -eux + +declare -a args +IFS='/:' read -ra args <<< "$1" + +group="${args[0]}" + +command -v python +python -V +if [ "$2" = "2.7" ] +then + echo "The specified environment is Python2.7" +else + alias pip='pip3' + sudo apt update + sudo apt install software-properties-common + sudo add-apt-repository ppa:deadsnakes/ppa + sudo apt install python"$2" -y + sudo apt install python3-dateutil + sudo update-alternatives --install /usr/bin/python3 python3 /usr/bin/python"$2" 1 + + if [ "$2" = "3.10" ] + then + sudo apt-get install python3.10-distutils + fi + + if [ "$2" = "3.11" ] + then + sudo apt-get install python3.11-distutils + fi +fi + +command -v pip +pip --version +pip list --disable-pip-version-check + +export PATH="${PWD}/bin:${PATH}" +export PYTHONIOENCODING="UTF-8" +export LC_ALL="en_US.utf-8" + +pip install virtualenv +virtualenv --python /usr/bin/python"$2" ~/ansible-venv + +set +ux +. ~/ansible-venv/bin/activate +set -ux + +git clone https://github.com/ansible/ansible.git +cd "ansible" +if [ "$3" = "devel" ] +then + echo "The branch is devel" +else + git checkout "stable-$3" +fi +source hacking/env-setup +pip install paramiko PyYAML Jinja2 httplib2 six + +TEST_DIR="${HOME}/.ansible/ansible_collections/azure/azcollection" +mkdir -p "${TEST_DIR}" +cp -aT "${SHIPPABLE_BUILD_DIR}" "${TEST_DIR}" +cd "${TEST_DIR}" +mkdir -p shippable/testresults + +pip install -I -r "${TEST_DIR}/requirements-azure.txt" +pip install -I -r "${TEST_DIR}/sanity-requirements-azure.txt" + +timeout=90 + +if [ "$4" = "all" ] +then + echo "All module need test" +else + path_dir="${TEST_DIR}/tests/integration/targets/" + for item in "$path_dir"* + do + if [ "${item}" = "$path_dir""$4" ] + then + echo "PASS" + else + echo " " >> "${item}"/aliases + echo "disabled" >> "${item}"/aliases + fi + done +fi +echo '--------------------------------------------' +pip list +ansible --version +echo '--------------------------------------------' + +ansible-test env --dump --show --timeout "${timeout}" --color -v + +cat <> "${TEST_DIR}"/tests/integration/cloud-config-azure.ini +[default] +AZURE_CLIENT_ID:${AZURE_CLIENT_ID} +AZURE_SECRET:${AZURE_SECRET} +AZURE_SUBSCRIPTION_ID:${AZURE_SUBSCRIPTION_ID} +AZURE_SUBSCRIPTION_SEC_ID:${AZURE_SUBSCRIPTION_SEC_ID} +AZURE_TENANT:${AZURE_TENANT} +RESOURCE_GROUP:${RESOURCE_GROUP} +RESOURCE_GROUP_SECONDARY:${RESOURCE_GROUP_SECONDARY} +RESOURCE_GROUP_DATALAKE:${RESOURCE_GROUP_DATALAKE} +AZURE_PRINCIPAL_ID:${AZURE_PRINCIPAL_ID} +AZURE_MANAGED_BY_TENANT_ID:${AZURE_MANAGED_BY_TENANT_ID} +AZURE_ROLE_DEFINITION_ID:${AZURE_ROLE_DEFINITION_ID} +EOF + +if [ "sanity" = "${group}" ] +then + ansible-test sanity --color -v --junit +else + ansible-test integration --color -v --retry-on-error "shippable/azure/group${group}/" --allow-destructive +fi diff --git a/ansible_collections/azure/azcollection/tests/utils/shippable/azure.sh b/ansible_collections/azure/azcollection/tests/utils/shippable/azure.sh new file mode 100644 index 000000000..da037e09e --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/utils/shippable/azure.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash + +set -o pipefail -eux + +declare -a args +IFS='/:' read -ra args <<< "$1" + +cloud="${args[0]}" +python="${args[1]}" +group="${args[2]}" + +target="shippable/${cloud}/group${group}/" + +stage="${S:-prod}" + +changed_all_target="shippable/${cloud}/smoketest/" + +if ! ansible-test integration "${changed_all_target}" --list-targets > /dev/null 2>&1; then + # no smoketest tests are available for this cloud + changed_all_target="none" +fi + +if [ "${group}" == "1" ]; then + # only run smoketest tests for group1 + changed_all_mode="include" +else + # smoketest tests already covered by group1 + changed_all_mode="exclude" +fi + +# shellcheck disable=SC2086 +ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \ + --remote-terminate always --remote-stage "${stage}" \ + --docker --python "${python}" --changed-all-target "${changed_all_target}" --changed-all-mode "${changed_all_mode}" diff --git a/ansible_collections/azure/azcollection/tests/utils/shippable/check_matrix.py b/ansible_collections/azure/azcollection/tests/utils/shippable/check_matrix.py new file mode 100644 index 000000000..96a377589 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/utils/shippable/check_matrix.py @@ -0,0 +1,114 @@ +#!/usr/bin/env python +"""Verify the currently executing Shippable test matrix matches the one defined in the "shippable.yml" file.""" +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import datetime +import json +import os +import re +import sys +import time +from ansible.module_utils.urls import open_url as urlopen + +try: + from typing import NoReturn +except ImportError: + NoReturn = None + + +def main(): # type: () -> None + """Main entry point.""" + repo_full_name = os.environ['REPO_FULL_NAME'] + required_repo_full_name = 'ansible-collections/azure' + + if repo_full_name != required_repo_full_name: + sys.stderr.write('Skipping matrix check on repo "%s" which is not "%s".\n' % (repo_full_name, required_repo_full_name)) + return + + with open('shippable.yml', 'rb') as yaml_file: + yaml = yaml_file.read().decode('utf-8').splitlines() + + defined_matrix = [match.group(1) for match in [re.search(r'^ *- env: T=(.*)$', line) for line in yaml] if match and match.group(1) != 'none'] + + if not defined_matrix: + fail('No matrix entries found in the "shippable.yml" file.', + 'Did you modify the "shippable.yml" file?') + + run_id = os.environ['SHIPPABLE_BUILD_ID'] + sleep = 1 + jobs = [] + + for attempts_remaining in range(4, -1, -1): + try: + jobs = json.loads(urlopen('https://api.shippable.com/jobs?runIds=%s' % run_id).read()) + + if not isinstance(jobs, list): + raise Exception('Shippable run %s data is not a list.' % run_id) + + break + except Exception as ex: + if not attempts_remaining: + fail('Unable to retrieve Shippable run %s matrix.' % run_id, + str(ex)) + + sys.stderr.write('Unable to retrieve Shippable run %s matrix: %s\n' % (run_id, ex)) + sys.stderr.write('Trying again in %d seconds...\n' % sleep) + time.sleep(sleep) + sleep *= 2 + + if len(jobs) != len(defined_matrix): + if len(jobs) == 1: + hint = '\n\nMake sure you do not use the "Rebuild with SSH" option.' + else: + hint = '' + + fail('Shippable run %s has %d jobs instead of the expected %d jobs.' % (run_id, len(jobs), len(defined_matrix)), + 'Try re-running the entire matrix.%s' % hint) + + actual_matrix = dict((job.get('jobNumber'), dict(tuple(line.split('=', 1)) for line in job.get('env', [])).get('T', '')) for job in jobs) + errors = [(job_number, test, actual_matrix.get(job_number)) for job_number, test in enumerate(defined_matrix, 1) if actual_matrix.get(job_number) != test] + + if len(errors): + error_summary = '\n'.join('Job %s expected "%s" but found "%s" instead.' % (job_number, expected, actual) for job_number, expected, actual in errors) + + fail('Shippable run %s has a job matrix mismatch.' % run_id, + 'Try re-running the entire matrix.\n\n%s' % error_summary) + + +def fail(message, output): # type: (str, str) -> NoReturn + # Include a leading newline to improve readability on Shippable "Tests" tab. + # Without this, the first line becomes indented. + output = '\n' + output.strip() + + timestamp = datetime.datetime.utcnow().replace(microsecond=0).isoformat() + + # hack to avoid requiring junit-xml, which isn't pre-installed on Shippable outside our test containers + xml = ''' + + +\t +\t\t +\t\t\t%s +\t\t +\t + +''' % (timestamp, message, output) + + path = 'shippable/testresults/check-matrix.xml' + dir_path = os.path.dirname(path) + + if not os.path.exists(dir_path): + os.makedirs(dir_path) + + with open(path, 'w') as junit_fd: + junit_fd.write(xml.lstrip()) + + sys.stderr.write(message + '\n') + sys.stderr.write(output + '\n') + + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/azure/azcollection/tests/utils/shippable/cloud.sh b/ansible_collections/azure/azcollection/tests/utils/shippable/cloud.sh new file mode 100644 index 000000000..da037e09e --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/utils/shippable/cloud.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash + +set -o pipefail -eux + +declare -a args +IFS='/:' read -ra args <<< "$1" + +cloud="${args[0]}" +python="${args[1]}" +group="${args[2]}" + +target="shippable/${cloud}/group${group}/" + +stage="${S:-prod}" + +changed_all_target="shippable/${cloud}/smoketest/" + +if ! ansible-test integration "${changed_all_target}" --list-targets > /dev/null 2>&1; then + # no smoketest tests are available for this cloud + changed_all_target="none" +fi + +if [ "${group}" == "1" ]; then + # only run smoketest tests for group1 + changed_all_mode="include" +else + # smoketest tests already covered by group1 + changed_all_mode="exclude" +fi + +# shellcheck disable=SC2086 +ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \ + --remote-terminate always --remote-stage "${stage}" \ + --docker --python "${python}" --changed-all-target "${changed_all_target}" --changed-all-mode "${changed_all_mode}" diff --git a/ansible_collections/azure/azcollection/tests/utils/shippable/sanity.sh b/ansible_collections/azure/azcollection/tests/utils/shippable/sanity.sh new file mode 100644 index 000000000..0c54a42c8 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/utils/shippable/sanity.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +set -o pipefail -eux + +# shellcheck disable=SC2086 +ansible-test sanity --color -v --junit ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} \ + --docker \ diff --git a/ansible_collections/azure/azcollection/tests/utils/shippable/shippable.sh b/ansible_collections/azure/azcollection/tests/utils/shippable/shippable.sh new file mode 100644 index 000000000..b67235789 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/utils/shippable/shippable.sh @@ -0,0 +1,124 @@ +#!/usr/bin/env bash + +set -o pipefail -eux + +declare -a args +IFS='/:' read -ra args <<< "$1" + +script="${args[0]}" + +test="$1" + +docker images ansible/ansible +docker images quay.io/ansible/* +docker ps + +for container in $(docker ps --format '{{.Image}} {{.ID}}' | grep -v '^drydock/' | sed 's/^.* //'); do + docker rm -f "${container}" || true # ignore errors +done + +docker ps + +if [ -d /home/shippable/cache/ ]; then + ls -la /home/shippable/cache/ +fi + +command -v python +python -V + +command -v pip +pip --version +pip list --disable-pip-version-check + +export PATH="${PWD}/bin:${PATH}" +export PYTHONIOENCODING='utf-8' + +if [ "${JOB_TRIGGERED_BY_NAME:-}" == "nightly-trigger" ]; then + COVERAGE=yes + COMPLETE=yes +fi + +if [ -n "${COVERAGE:-}" ]; then + # on-demand coverage reporting triggered by setting the COVERAGE environment variable to a non-empty value + export COVERAGE="--coverage" +elif [[ "${COMMIT_MESSAGE}" =~ ci_coverage ]]; then + # on-demand coverage reporting triggered by having 'ci_coverage' in the latest commit message + export COVERAGE="--coverage" +else + # on-demand coverage reporting disabled (default behavior, always-on coverage reporting remains enabled) + export COVERAGE="--coverage-check" +fi + +if [ -n "${COMPLETE:-}" ]; then + # disable change detection triggered by setting the COMPLETE environment variable to a non-empty value + export CHANGED="" +elif [[ "${COMMIT_MESSAGE}" =~ ci_complete ]]; then + # disable change detection triggered by having 'ci_complete' in the latest commit message + export CHANGED="" +else + # enable change detection (default behavior) + export CHANGED="--changed" +fi + +if [ "${IS_PULL_REQUEST:-}" == "true" ]; then + # run unstable tests which are targeted by focused changes on PRs + export UNSTABLE="--allow-unstable-changed" +else + # do not run unstable tests outside PRs + export UNSTABLE="" +fi + +virtualenv --python /usr/bin/python3.7 ~/ansible-venv +set +ux +. ~/ansible-venv/bin/activate +set -ux + +pip install ansible==2.9.0 --disable-pip-version-check + +TEST_DIR="${HOME}/.ansible/ansible_collections/azure/azcollection" +mkdir -p "${TEST_DIR}" +cp -aT "${SHIPPABLE_BUILD_DIR}" "${TEST_DIR}" +cd "${TEST_DIR}" + +function cleanup +{ + if [ -d tests/output/coverage/ ]; then + if find tests/output/coverage/ -mindepth 1 -name '.*' -prune -o -print -quit | grep -q .; then + # for complete on-demand coverage generate a report for all files with no coverage on the "other" job so we only have one copy + if [ "${COVERAGE}" == "--coverage" ] && [ "${CHANGED}" == "" ] && [ "${test}" == "sanity/1" ]; then + stub="--stub" + else + stub="" + fi + + # shellcheck disable=SC2086 + ansible-test coverage xml --color -v --requirements --group-by command --group-by version ${stub:+"$stub"} + cp -a tests/output/reports/coverage=*.xml shippable/codecoverage/ + fi + fi + + if [ -d tests/output/junit/ ]; then + cp -aT tests/output/junit/ shippable/testresults/ + fi + + if [ -d tests/output/data/ ]; then + cp -a tests/output/data/ shippable/testresults/ + fi + + if [ -d tests/output/bot/ ]; then + cp -aT tests/output/bot/ shippable/testresults/ + fi +} + +trap cleanup EXIT + +if [[ "${COVERAGE:-}" == "--coverage" ]]; then + timeout=60 +else + timeout=45 +fi + +ansible-test env --dump --show --timeout "${timeout}" --color -v + +"tests/utils/shippable/check_matrix.py" +"tests/utils/shippable/${script}.sh" "${test}" diff --git a/ansible_collections/azure/azcollection/tests/utils/shippable/timing.py b/ansible_collections/azure/azcollection/tests/utils/shippable/timing.py new file mode 100644 index 000000000..d9456855d --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/utils/shippable/timing.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python3.6 +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import sys +import time + +start = time.time() + +sys.stdin.reconfigure(errors='surrogateescape') +sys.stdout.reconfigure(errors='surrogateescape') + +for line in sys.stdin: + seconds = time.time() - start + sys.stdout.write('%02d:%02d %s' % (seconds // 60, seconds % 60, line)) + sys.stdout.flush() diff --git a/ansible_collections/azure/azcollection/tests/utils/shippable/timing.sh b/ansible_collections/azure/azcollection/tests/utils/shippable/timing.sh new file mode 100644 index 000000000..77e257830 --- /dev/null +++ b/ansible_collections/azure/azcollection/tests/utils/shippable/timing.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +set -o pipefail -eu + +"$@" 2>&1 | "$(dirname "$0")/timing.py" -- cgit v1.2.3