summaryrefslogtreecommitdiffstats
path: root/ansible_collections/azure
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-26 06:22:16 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-26 06:22:16 +0000
commit0c70278249c356e314434985d3a92b4105c88872 (patch)
treef7e16f44fb8fc7a54f84d9977711c87d9e435666 /ansible_collections/azure
parentAdding debian version 10.0.1+dfsg-1. (diff)
downloadansible-0c70278249c356e314434985d3a92b4105c88872.tar.xz
ansible-0c70278249c356e314434985d3a92b4105c88872.zip
Merging upstream version 10.1.0+dfsg.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'ansible_collections/azure')
-rw-r--r--ansible_collections/azure/azcollection/CHANGELOG.md65
-rw-r--r--ansible_collections/azure/azcollection/CONTRIBUTING.md6
-rw-r--r--ansible_collections/azure/azcollection/FILES.json493
-rw-r--r--ansible_collections/azure/azcollection/MANIFEST.json4
-rw-r--r--ansible_collections/azure/azcollection/README.md2
-rw-r--r--ansible_collections/azure/azcollection/meta/execution-environment.yml2
-rw-r--r--ansible_collections/azure/azcollection/meta/runtime.yml21
-rw-r--r--ansible_collections/azure/azcollection/plugins/doc_fragments/azure.py2
-rw-r--r--ansible_collections/azure/azcollection/plugins/inventory/azure_rm.py3
-rw-r--r--ansible_collections/azure/azcollection/plugins/lookup/azure_keyvault_secret.py24
-rw-r--r--ansible_collections/azure/azcollection/plugins/module_utils/azure_rm_common.py31
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_accesstoken_info.py2
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_adapplication.py326
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_adapplication_info.py44
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_adgroup.py92
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_adgroup_info.py46
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_aduser.py78
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_aduser_info.py33
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_aks.py103
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_aksagentpool.py656
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_aksagentpool_info.py131
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_backupazurevm.py10
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_cognitivesearch.py18
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_datalakestore.py807
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_datalakestore_info.py468
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_loadbalancer.py208
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_localnetworkgateway.py427
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_localnetworkgateway_info.py267
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_networkinterface.py64
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlflexiblefirewallrule.py2
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlflexibleserver.py102
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlflexibleserver_info.py29
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_rediscache_info.py4
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidatabase.py392
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidatabase_info.py305
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidblongtermretentionpolicy.py277
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidblongtermretentionpolicy_info.py228
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidbshorttermretentionpolicy.py244
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidbshorttermretentionpolicy_info.py206
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_storageaccountmanagementpolicy.py672
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_storageaccountmanagementpolicy_info.py245
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachine.py89
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinescalesetinstance.py43
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinescalesetinstance_info.py52
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualnetworkgatewaynatrule.py354
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualnetworkgatewaynatrule_info.py236
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_vmsku_info.py311
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_webapp.py309
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_webapp_info.py25
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_webappaccessrestriction.py141
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_webappaccessrestriction_info.py52
-rw-r--r--ansible_collections/azure/azcollection/pr-pipelines.yml7
-rw-r--r--ansible_collections/azure/azcollection/requirements.txt (renamed from ansible_collections/azure/azcollection/requirements-azure.txt)7
-rw-r--r--ansible_collections/azure/azcollection/sanity-requirements.txt (renamed from ansible_collections/azure/azcollection/sanity-requirements-azure.txt)0
-rw-r--r--ansible_collections/azure/azcollection/tests/integration/requirements.txt51
-rw-r--r--ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adapplication/tasks/main.yml12
-rw-r--r--ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aduser/tasks/main.yml84
-rw-r--r--ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aks/tasks/main.yml65
-rw-r--r--ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aks/tasks/minimal-cluster.yml142
-rw-r--r--ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aksagentpool/tasks/main.yml111
-rw-r--r--ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cognitivesearch/tasks/main.yml4
-rw-r--r--ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cosmosdbaccount/tasks/main.yml28
-rw-r--r--ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_datalakestore/tasks/main.yml199
-rw-r--r--ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_eventhub/tasks/main.yml4
-rw-r--r--ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_expressroute/tasks/main.yml18
-rw-r--r--ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_image/tasks/main.yml5
-rw-r--r--ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_loadbalancer/tasks/main.yml138
-rw-r--r--ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_localnetworkgateway/aliases3
-rw-r--r--ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_localnetworkgateway/meta/main.yml (renamed from ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_datalakestore/meta/main.yml)0
-rw-r--r--ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_localnetworkgateway/tasks/main.yml114
-rw-r--r--ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_mariadbserver/aliases1
-rw-r--r--ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_mysqlserver/aliases1
-rw-r--r--ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_networkinterface/tasks/main.yml46
-rw-r--r--ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_postgresqlflexibleserver/tasks/main.yml156
-rw-r--r--ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatelinkservice/tasks/main.yml1
-rw-r--r--ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_sqlmanageddatabase/aliases (renamed from ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_datalakestore/aliases)2
-rw-r--r--ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_sqlmanageddatabase/meta/main.yml2
-rw-r--r--ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_sqlmanageddatabase/tasks/main.yml264
-rw-r--r--ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageaccountmanagementpolicy/aliases3
-rw-r--r--ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageaccountmanagementpolicy/meta/main.yml2
-rw-r--r--ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageaccountmanagementpolicy/tasks/main.yml242
-rw-r--r--ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageblob/tasks/main.yml19
-rw-r--r--ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/inventory.yml4
-rw-r--r--ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_public_ip.yml9
-rw-r--r--ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_skus.yml136
-rw-r--r--ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachineextension/tasks/main.yml5
-rw-r--r--ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachinescaleset/tasks/main.yml12
-rw-r--r--ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetworkgatewaynatrule/aliases3
-rw-r--r--ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetworkgatewaynatrule/meta/main.yml2
-rw-r--r--ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetworkgatewaynatrule/tasks/main.yml131
-rw-r--r--ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webapp/tasks/main.yml68
-rw-r--r--ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webappaccessrestriction/tasks/main.yml44
-rw-r--r--ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/playbooks/setup.yml21
-rw-r--r--ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/playbooks/teardown.yml7
-rw-r--r--ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/playbooks/test_inventory_filter.yml21
-rw-r--r--ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/playbooks/vars.yml1
-rwxr-xr-xansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/runme.sh5
-rw-r--r--ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/templates/filter.yml14
-rwxr-xr-xansible_collections/azure/azcollection/tests/utils/ado/ado.sh4
99 files changed, 8609 insertions, 2360 deletions
diff --git a/ansible_collections/azure/azcollection/CHANGELOG.md b/ansible_collections/azure/azcollection/CHANGELOG.md
index f271fc377..27c5e9889 100644
--- a/ansible_collections/azure/azcollection/CHANGELOG.md
+++ b/ansible_collections/azure/azcollection/CHANGELOG.md
@@ -1,5 +1,70 @@
# Change Log
+## v2.4.0 (2024-05-30)
+
+### NEW MODULES
+ - azure_rm_storageaccountmanagementpolicy: Add support for manage storage account management policy ([#1536](https://github.com/ansible-collections/azure/pull/1536))
+ - azure_rm_storageaccountmanagementpolicy_info: Add support for manage storage account management policy ([#1536](https://github.com/ansible-collections/azure/pull/1536))
+ - azure_rm_virtualnetworkgatewaynatrule: Add support for managed virtual network gateway nat rule ([#1525](https://github.com/ansible-collections/azure/pull/1525))
+ - azure_rm_virtualnetworkgatewaynatrule_info: Add support for virtual network gateway nat rule ([#1525](https://github.com/ansible-collections/azure/pull/1525))
+ - azure_rm_localnetworkgateway: Add support for mange local network gateway ([#1523](https://github.com/ansible-collections/azure/pull/1523))
+ - azure_rm_localnetworkgateway_info: Add fetch for mange local network gateway facts ([#1523](https://github.com/ansible-collections/azure/pull/1523))
+ - azure_rm_sqlmidatabase: Add support for managed SQL managed database ([#1548](https://github.com/ansible-collections/azure/pull/1548))
+ - azure_rm_sqlmidatabase_info: Add support for fetch the managed SQL managed database ([#1548](https://github.com/ansible-collections/azure/pull/1548))
+ - azure_rm_sqlmidblongtermretentionpolicy: Add support for managed SQL managed database long term retention policy ([#1548](https://github.com/ansible-collections/azure/pull/1548))
+ - azure_rm_sqlmidblongtermretentionpolicy_info: Add support for fetch managed SQL managed database long term retention policy ([#1548](https://github.com/ansible-collections/azure/pull/1548))
+ - azure_rm_sqlmidbshorttermretentionpolicy: Add support for fetch managed SQL managed database short term retention policy ([#1548](https://github.com/ansible-collections/azure/pull/1548))
+ - azure_rm_sqlmidbshorttermretentionpolicy_info: Add support for fetch managed SQL managed database short term retention policy ([#1548](https://github.com/ansible-collections/azure/pull/1548))
+ - azure_rm_vmsku_info: Add support for list VM SKUs ([#1546](https://github.com/ansible-collections/azure/pull/1546))
+ - tests/integration/requirements.txt: Symlink requirements-azure.txt from tests/integration ([#1551](https://github.com/ansible-collections/azure/pull/1551))
+
+### FEATURE ENHANCEMENT
+ - azure_rm_aduser: Add support for `on_premises_extension_attributes` ([#1518](https://github.com/ansible-collections/azure/pull/1518))
+ - azure_rm_aduser_info: Add support for `on_premises_extension_attributes` ([#1518](https://github.com/ansible-collections/azure/pull/1518))
+ - azure_keyvault_secret: Add support for `cloud_type` ([#1517](https://github.com/ansible-collections/azure/pull/1517))
+ - azure_rm_postgresqlflexibleserver: Add support for `identity` ([#1528](https://github.com/ansible-collections/azure/pull/1528))
+ - azure_rm_postgresqlflexibleserver_info: Add support for `identity` ([#1528](https://github.com/ansible-collections/azure/pull/1528))
+ - plugins/inventory/azure_rm.py: Expand Inventory filter integration tests ([#1547](https://github.com/ansible-collections/azure/pull/1547))
+ - azure_rm_webapp: Add support for `site_auth_settings` ([#1538](https://github.com/ansible-collections/azure/pull/1538))
+ - azure_rm_webapp_info: Add support for `site_auth_settings` ([#1538](https://github.com/ansible-collections/azure/pull/1538))
+ - azure_rm_aks:
+ - Add support for UserAssigned Identity ([#1543](https://github.com/ansible-collections/azure/pull/1543))
+ - Add `managedNATGateway` and `userAssignedNATGateway` to `outbound_type` ([#1537](https://github.com/ansible-collections/azure/pull/1537))
+ - azure_rm_webappaccessrestriction: Add more parameters to `ip_security_restrictions` ([#1558](https://github.com/ansible-collections/azure/pull/1558))
+ - azure_rm_webappaccessrestriction_info: Add more parameters to `ip_security_restrictions` ([#1558](https://github.com/ansible-collections/azure/pull/1558))
+ - azure_rm_virtualmachine: Add support for attaching existing managed data disks at VM creation ([#1430](https://github.com/ansible-collections/azure/pull/1430))
+ - azure_rm_aksagentpool: Add support for more parameters ([#1477](https://github.com/ansible-collections/azure/pull/1477))
+ - azure_rm_aksagentpool_info: Add support for more parameters ([#1477](https://github.com/ansible-collections/azure/pull/1477))
+ - azure_rm_adgroup: Allow service principals and nested groups to be returned in membership attributes ([#1507](https://github.com/ansible-collections/azure/pull/1507))
+ - azure_rm_adgroup_info: Allow service principals and nested groups to be returned in membership attributes ([#1507](https://github.com/ansible-collections/azure/pull/1507))
+ - azure_rm_backupazurevm: No need to json serialization the response ([#1531](https://github.com/ansible-collections/azure/pull/1531))
+
+### BUG FIXING
+ - azure_rm_adapplication: Fix `optional_claims` handling ([#1480](https://github.com/ansible-collections/azure/pull/1480))
+ - azure_rm_cognitivesearch: Fix test failed ([#1520](https://github.com/ansible-collections/azure/pull/1520))
+ - azure_rm_common.py: Fix the inconsistency between custom classes and Python SDK attributes ([#1554](https://github.com/ansible-collections/azure/pull/1554))
+ - meta/runtime.yml:
+ - Keep action_groups and modules list consistent ([#1553](https://github.com/ansible-collections/azure/pull/1553))
+ - Delete the deprecate modules ([#1556](https://github.com/ansible-collections/azure/pull/1556))
+ - azure_rm_rediscache_info: Fix typo ([#1550](https://github.com/ansible-collections/azure/pull/1550))
+ - plugins/inventory/azure_rm.py: Fix inventory host processing ([#1545](https://github.com/ansible-collections/azure/pull/1545))
+ - azure_rm_accesstoken_info: Fix authorization issue ([#1541](https://github.com/ansible-collections/azure/pull/1541))
+ - azure_rm_adgroup: Support update functionality ([#1530](https://github.com/ansible-collections/azure/pull/1530))
+ - azure_rm_webapp: Delete the imported logging module ([#1567](https://github.com/ansible-collections/azure/pull/1567))
+ - azure_rm_postgresqlflexiblefirewallrule: Delete the logging module ([#1567](https://github.com/ansible-collections/azure/pull/1567))
+ - azure_rm_loadbalancer: Remove functionality which should have been removed for Ansible 2.9 ([#1508](https://github.com/ansible-collections/azure/pull/1508))
+ - azure_rm_networkinterface: Remove functionality which should have been removed for Ansible 2.9 ([#1508](https://github.com/ansible-collections/azure/pull/1508))
+ - azure_rm_localnetworkgateway: Fix documentation mistakes ([#1563](https://github.com/ansible-collections/azure/pull/1563))
+ - azure_rm_virtualmachine: Create `_own_nsg_` tag only if `created_nsg` is `true` ([#1565](https://github.com/ansible-collections/azure/pull/1565))
+ - azure_rm_storageblob: Fix authentication issue when shared keys disabled ([#1564](https://github.com/ansible-collections/azure/pull/1564))
+ - azure_rm_virtualmachinescalesetinstance_info: Fixed obtaining flexible VMSS instances failed ([#1529](https://github.com/ansible-collections/azure/pull/1529))
+
+### BREAKING CHANGE:
+ - azure_rm_datalakestore: Deprecate `azure-mgmt-datalake-store` ([#1555](https://github.com/ansible-collections/azure/pull/1555))
+ - azure_rm_datalakestore_info: Deprecate `azure_rm_datalakestore_info` ([#1555](https://github.com/ansible-collections/azure/pull/1555))
+ - requirements.txt: Rename `requirements-azure.txt` to `requirements.txt` ([#1552](https://github.com/ansible-collections/azure/pull/1552))
+ - sanity-requirements.txt: Rename `sanity-requirements-azure.txt` to `sanity-requirements.txt` ([#1552](https://github.com/ansible-collections/azure/pull/1552))
+
## v2.3.0 (2024-03-27)
### NEW MODULES
diff --git a/ansible_collections/azure/azcollection/CONTRIBUTING.md b/ansible_collections/azure/azcollection/CONTRIBUTING.md
index 8358024a3..def176635 100644
--- a/ansible_collections/azure/azcollection/CONTRIBUTING.md
+++ b/ansible_collections/azure/azcollection/CONTRIBUTING.md
@@ -17,8 +17,8 @@ When contributing to this repository, please first discuss the change you wish t
. venv/bin/activate
pip3 install -U pip
pip3 install ansible
- pip3 install -r requirements-azure.txt
- pip3 install -r sanity-requirements-azure.txt
+ pip3 install -r requirements.txt
+ pip3 install -r sanity-requirements.txt
```
## Running tests
@@ -51,7 +51,7 @@ Additional `ansible-test` resources:
1. Please provide integration tests showing the changed behavior/functionality under `tests/integration/targets/<relevant-module>/tasks`.
1. Think about updating the documentation and examples for the changed module.
-1. Please run a sanity check. Install prerequisites `pip install -r sanity-requirements-azure.txt`, run with `ansible-test sanity --color -v --junit`. Read more at https://docs.ansible.com/ansible/latest/dev_guide/testing_sanity.html.
+1. Please run a sanity check. Install prerequisites `pip install -r sanity-requirements.txt`, run with `ansible-test sanity --color -v --junit`. Read more at https://docs.ansible.com/ansible/latest/dev_guide/testing_sanity.html.
1. There is a script `tests/utils/ado/ado.sh` for running tests inside an Azure DevOps pipeline. Unfortunately the pipeline and results are not visible for the public. You can perhaps adapt the parts of the script or use a small playbook to run the task list of the integration tests mentioned above.
## Release Process
diff --git a/ansible_collections/azure/azcollection/FILES.json b/ansible_collections/azure/azcollection/FILES.json
index 57fe6593a..89f7b8038 100644
--- a/ansible_collections/azure/azcollection/FILES.json
+++ b/ansible_collections/azure/azcollection/FILES.json
@@ -18,14 +18,14 @@
"name": "meta/execution-environment.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "4ed6d806d5b0456d5d6ab6e46d68bdbe7b46b10b4352a80ae8b8487220337742",
+ "chksum_sha256": "0e30f905b015d1d8a17d3a540cb8892b479fcbf9cb873ac2ff0665fe499f318e",
"format": 1
},
{
"name": "meta/runtime.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d034d1f9a361fef10f399b0a9e563b6b08df2a190432b69aced0323849298fcf",
+ "chksum_sha256": "b20ca160210ef90dd028b531f408c398538edc3f739ed6acd56f679764074d60",
"format": 1
},
{
@@ -74,7 +74,7 @@
"name": "tests/utils/ado/ado.sh",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "6e74c310ccc00f71866dc1cd4bc73a66de6a38b2d8dceb2aef6d8bf2a20908f9",
+ "chksum_sha256": "c7dadf203058cd24f2c40f32d0f0a876dac25fad06184ea2bc5fb6e29161d9e4",
"format": 1
},
{
@@ -270,7 +270,7 @@
"name": "tests/integration/targets/azure_rm_webapp/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9774c1a314a6b5fef489d9405550a1395b9468bbea779af6ca95ae47af6386a9",
+ "chksum_sha256": "d462724d603f647080b3432af1e17d420e449077c59df7f3f4609fc5a3b158a2",
"format": 1
},
{
@@ -718,7 +718,7 @@
"name": "tests/integration/targets/azure_rm_aksagentpool/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "88959c5262b2f543793b47314b9bce24d267e32f9c3f419f71a242dbbccadf12",
+ "chksum_sha256": "ee9076297fe0a2b3cadab664bd2ab8f9e79b2a124b3a6d93f1c44a5fe2fb579a",
"format": 1
},
{
@@ -781,7 +781,7 @@
"name": "tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_public_ip.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "fc7a86147194125bacfe94a37a05af001ff6d68b6ee5b759d6561ceea51b6b33",
+ "chksum_sha256": "1d0616c0d69966ed1c2c8be54be02fa504886f509c926b33325fe2aa2478cd60",
"format": 1
},
{
@@ -806,6 +806,13 @@
"format": 1
},
{
+ "name": "tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_skus.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ffa9c0c2af337f8cec9f7b89ac35d90dc2115029124baccbad99a0448b87228a",
+ "format": 1
+ },
+ {
"name": "tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_image_latest.yml",
"ftype": "file",
"chksum_type": "sha256",
@@ -865,7 +872,7 @@
"name": "tests/integration/targets/azure_rm_virtualmachine/inventory.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "dbfa6b4c9d720fa3c687dfb4c1101701708593184aaf0950ea66cca1f7bf63eb",
+ "chksum_sha256": "1026ed40845d0bac9ebe24f762cb7ae7f8a4106f009d7f5dcb78012e9a2b0d04",
"format": 1
},
{
@@ -1082,14 +1089,14 @@
"name": "tests/integration/targets/azure_rm_aks/tasks/minimal-cluster.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2b2be2027bb762b22ccf731a32baf3fa8b2675369c08b76c1499b3ea10902add",
+ "chksum_sha256": "498b94fcebf9ede6141bda0bbbadf14ef3eafcca168126018ca02c4a1ee7d169",
"format": 1
},
{
"name": "tests/integration/targets/azure_rm_aks/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "683619ec3541848983b2f513eae4d885c97d3040ac188d252aec7304c3440c0b",
+ "chksum_sha256": "f422e499d76bdabbe76f1b96731abe2fc0dffba34961b40bf351475dae9af0ae",
"format": 1
},
{
@@ -1383,7 +1390,7 @@
"name": "tests/integration/targets/azure_rm_eventhub/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c4ef2c774f92b1d1c61c070407cb15cec4492593f03dfc095031136a6fc315e4",
+ "chksum_sha256": "3a85ebeff8f8be5b1241569a00c38d59bd340c0f6fb536d5b00fb34f4b205183",
"format": 1
},
{
@@ -1929,7 +1936,7 @@
"name": "tests/integration/targets/azure_rm_virtualmachineextension/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9e3cb1009bcddcf4db403356c71a3862d40d9978395362480795d87961229f8d",
+ "chksum_sha256": "9e14c469074c567713a26ac54f6858c525f4fc093ee99dc47c8a3adad2317611",
"format": 1
},
{
@@ -1992,7 +1999,7 @@
"name": "tests/integration/targets/azure_rm_webappaccessrestriction/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "23178ef47e9dbb43a51984becf44d72972a399952958fd2a76d71d6338864252",
+ "chksum_sha256": "487fcfc238a725862690c3b7aa968873a2839e2e2e568e41d113d7204f62580a",
"format": 1
},
{
@@ -2034,7 +2041,7 @@
"name": "tests/integration/targets/azure_rm_mysqlserver/aliases",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2c74420c4bc9cc5d8af14406a1d97ea762c6b8afa4de8728a4a833b2e742cf41",
+ "chksum_sha256": "e8fa125d2071b693fa257c8ca5976af713c6861aabe41f1aafe898de48e0dcfc",
"format": 1
},
{
@@ -2678,7 +2685,7 @@
"name": "tests/integration/targets/azure_rm_postgresqlflexibleserver/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b6fa3f158220b7cd8d38e7a773ee15e4f9781e627df4e3a209142f6c430414f1",
+ "chksum_sha256": "1c183408812df20a1e5416736ab072410277fcec3209eb764f18245ff470ad22",
"format": 1
},
{
@@ -2790,7 +2797,7 @@
"name": "tests/integration/targets/azure_rm_mariadbserver/aliases",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "0f1123b3696957f55774d4316a4461182998975a0db1f960ef289b20b5e926c7",
+ "chksum_sha256": "c2b0c67ab1a27132c26a7d0768c63194b16ef651a91408bfd1f9646a04e41584",
"format": 1
},
{
@@ -2850,6 +2857,48 @@
"format": 1
},
{
+ "name": "tests/integration/targets/azure_rm_virtualnetworkgatewaynatrule",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/azure_rm_virtualnetworkgatewaynatrule/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/azure_rm_virtualnetworkgatewaynatrule/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/azure_rm_virtualnetworkgatewaynatrule/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "109bb8ece8ca54a061f03f7050aca3401610606b79a6e313d2556dcb04fda637",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/azure_rm_virtualnetworkgatewaynatrule/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/azure_rm_virtualnetworkgatewaynatrule/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0dad8e7600bf9afdde5db7736b87f4d29bb3523fd55f72aa1b4357de196f2039",
+ "format": 1
+ },
+ {
"name": "tests/integration/targets/azure_rm_routetable",
"ftype": "dir",
"chksum_type": null,
@@ -3014,7 +3063,7 @@
"name": "tests/integration/targets/azure_rm_cognitivesearch/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e3712847b582b6a3a3f461055963ce51034e160b1e174d06b8b8f6eeecd4bc19",
+ "chksum_sha256": "9ea4303f86d14b8ce34ef9f49ba3c42c680f3ef236e82d177ec0ca5b1847be6e",
"format": 1
},
{
@@ -3098,7 +3147,7 @@
"name": "tests/integration/targets/azure_rm_cosmosdbaccount/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e44d0c16980fee5a6cca7dc10c26e4bb487b761eb6aa87c5cee17e527c37453d",
+ "chksum_sha256": "e20f224fa3c981a35b36a5d015c3a310e9e9140d4da727e3d5700d4d820646ce",
"format": 1
},
{
@@ -3182,7 +3231,7 @@
"name": "tests/integration/targets/azure_rm_privatelinkservice/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "26d7dc158898d9dbc017b66108ef335ff0b4a5ba01d2566cecbec6ad767763be",
+ "chksum_sha256": "0217aa78bbf022c890e4b3b1c4d9e979928b2d89446d236ffa49f0d85b72a214",
"format": 1
},
{
@@ -3266,7 +3315,7 @@
"name": "tests/integration/targets/azure_rm_loadbalancer/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "6b6dc390a0291155225b4305560c255bf3a36be60542213e2ad2fcc6fb9daf94",
+ "chksum_sha256": "17c0008d7d4478bf2c46548fbc575fe09c5feea4fa1220b0c6ed81c3d9cedca4",
"format": 1
},
{
@@ -3354,6 +3403,48 @@
"format": 1
},
{
+ "name": "tests/integration/targets/azure_rm_sqlmanageddatabase",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/azure_rm_sqlmanageddatabase/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/azure_rm_sqlmanageddatabase/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/azure_rm_sqlmanageddatabase/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "be93ff3f283965eee2a98d5254d46fc1f8643c67e5364cc32f30a6278156136c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/azure_rm_sqlmanageddatabase/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/azure_rm_sqlmanageddatabase/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "978ae8cc363ee0bc2dadb1440a0da469ebbc841ad5b0784b32de2ebc36ba9d0f",
+ "format": 1
+ },
+ {
"name": "tests/integration/targets/azure_rm_virtualmachinescaleset",
"ftype": "dir",
"chksum_type": null,
@@ -3392,7 +3483,49 @@
"name": "tests/integration/targets/azure_rm_virtualmachinescaleset/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2b560766f54992fab349101fb4528f9bf12022337f573ad7f8bc3a9b80e9dff7",
+ "chksum_sha256": "947482d7d00c73ad0bdf10f3338d5d88545d8c9452d9cdcef949f6517baf2b4d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/azure_rm_storageaccountmanagementpolicy",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/azure_rm_storageaccountmanagementpolicy/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/azure_rm_storageaccountmanagementpolicy/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/azure_rm_storageaccountmanagementpolicy/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "109bb8ece8ca54a061f03f7050aca3401610606b79a6e313d2556dcb04fda637",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/azure_rm_storageaccountmanagementpolicy/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/azure_rm_storageaccountmanagementpolicy/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "66a66526c148b5f7b1199052e5b11748f809af5196473858803740fd7889826f",
"format": 1
},
{
@@ -3581,7 +3714,7 @@
"name": "tests/integration/targets/inventory_azure/playbooks/vars.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "23fff20fb969471888915a24760b7a90723a30e3e0e6b7c7745864470c898027",
+ "chksum_sha256": "14e6cd31e3c01c64380ea41da324c28fdb69013e1ab588340a52878521ee45d7",
"format": 1
},
{
@@ -3595,7 +3728,14 @@
"name": "tests/integration/targets/inventory_azure/playbooks/setup.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e95740d1317dda6c7a610a482d33b29e2035293ff74e10a2bcc0f2997ba85430",
+ "chksum_sha256": "e52019613e959bd8c5cd2b5acb0893e284c82e914a9c8a4855de39aa490848f7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_azure/playbooks/test_inventory_filter.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7fe385bf9ed8dc9a1f673d8d2700d19d4719cef0b372271582a330780f9fcb4d",
"format": 1
},
{
@@ -3616,7 +3756,7 @@
"name": "tests/integration/targets/inventory_azure/playbooks/teardown.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1f44c8ef223d0219855c453cd859b44b7cd4b2fa10fb1f8cacac75e020975fc8",
+ "chksum_sha256": "b814aff009dbcc170e45c7eb754e23f0f711ecfcfeee671edec78d8dcd9fb994",
"format": 1
},
{
@@ -3634,6 +3774,13 @@
"format": 1
},
{
+ "name": "tests/integration/targets/inventory_azure/templates/filter.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f5ac04f954afcf425f27a4b3f04a6b251a6232a881ac6d07028027fc1d2a6b28",
+ "format": 1
+ },
+ {
"name": "tests/integration/targets/inventory_azure/templates/basic2.yml",
"ftype": "file",
"chksum_type": "sha256",
@@ -3644,7 +3791,7 @@
"name": "tests/integration/targets/inventory_azure/runme.sh",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "203fe16581a305486f7d5a2feafad324ed5515a96c26b4f4d84ab34a1cdcb6a3",
+ "chksum_sha256": "c97ddb33d20d39a8792d42d7c10bf940b595eb4c54ccc770e7637d7d7d79b1ad",
"format": 1
},
{
@@ -3742,7 +3889,7 @@
"name": "tests/integration/targets/azure_rm_image/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f15012224b91b0dcb340a2dfcd3d360a36bf2ed291b88a4206914ac5db295466",
+ "chksum_sha256": "000d058a325415d8c5273575da71d0b841945db7e8544dec89abd543cf2a7a5c",
"format": 1
},
{
@@ -4166,6 +4313,48 @@
"format": 1
},
{
+ "name": "tests/integration/targets/azure_rm_localnetworkgateway",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/azure_rm_localnetworkgateway/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/azure_rm_localnetworkgateway/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/azure_rm_localnetworkgateway/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "109bb8ece8ca54a061f03f7050aca3401610606b79a6e313d2556dcb04fda637",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/azure_rm_localnetworkgateway/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/azure_rm_localnetworkgateway/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1671301ff0e021480b49e44a541902ee03e0b19ed146feb4c65e3b72564cc925",
+ "format": 1
+ },
+ {
"name": "tests/integration/targets/azure_rm_registrationdefinition",
"ftype": "dir",
"chksum_type": null,
@@ -4372,7 +4561,7 @@
"name": "tests/integration/targets/azure_rm_aduser/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "978e84fe852bac2909bcf4d707ecd577b164e6bd25c694464403ff855562b369",
+ "chksum_sha256": "4007425fe50e73b451fb181b1dc93a4fde08d097a4e0a52198d7113ed0a8e8df",
"format": 1
},
{
@@ -4582,49 +4771,7 @@
"name": "tests/integration/targets/azure_rm_expressroute/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "0fee3fb92d49fcebdc6564a4becd35f638cfa294e5d893fc5adf2fff21ac072b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/azure_rm_datalakestore",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/azure_rm_datalakestore/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/azure_rm_datalakestore/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d00bd90476595ae7d97d3ca8f8aa7ff475ebc3dd87d7d12e862bf0527f7bd81f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/azure_rm_datalakestore/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "528893af1cac01f38ab277aec0138b83c74b2576464d96eb7f3da330784edaff",
- "format": 1
- },
- {
- "name": "tests/integration/targets/azure_rm_datalakestore/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/azure_rm_datalakestore/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0731e52280287102127653617210703eeb3bd7abf3125367f33df7fd8d0be818",
+ "chksum_sha256": "454d18a3654dde5beb768bea56fdd4ef2458179bc79a48c1cb62265034158efb",
"format": 1
},
{
@@ -4708,7 +4855,7 @@
"name": "tests/integration/targets/azure_rm_networkinterface/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "cd2b9fd7ff5b256d26839ba3646b9347fa95f8fd6b1104e7f2835e1d7b7b2624",
+ "chksum_sha256": "baf43905968e29eb26a5d990d8b28ad01bd872e204b0edea4cfaa07f24cd81da",
"format": 1
},
{
@@ -4750,7 +4897,7 @@
"name": "tests/integration/targets/azure_rm_adapplication/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e7b04ba312d08b8fdee715292776842a8533aed97ad6aae4c25ebcba04c8ccf2",
+ "chksum_sha256": "01dfe6079ef9bf4766404294c2f63eccea68790e9ed762ffcb0ddf8a1d4e8c55",
"format": 1
},
{
@@ -4960,7 +5107,7 @@
"name": "tests/integration/targets/azure_rm_storageblob/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "7175cd6cb47363104377b380cc011fa46f815bbc69b2d81cbd347786159bda9e",
+ "chksum_sha256": "0775e6b9e57bfde86683496fba1c56817948fa396743ab2986f40001e4655694",
"format": 1
},
{
@@ -5146,31 +5293,31 @@
"format": 1
},
{
- "name": "shippable.yml",
+ "name": "tests/integration/requirements.txt",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "515333579eea59619360d72e38cc2c5c9a8b43ff59cd3ddcc12c5b0172553b4a",
+ "chksum_sha256": "83fee6c4cd46119ab129ecb1012323a4e3acef73fcc5c4018cbd1aa89d6dca7a",
"format": 1
},
{
- "name": "README.md",
+ "name": "shippable.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "addfda03504e77be9156f1d5a1dec8ec3d9f81d08498ac814e7f782adbdc0e21",
+ "chksum_sha256": "515333579eea59619360d72e38cc2c5c9a8b43ff59cd3ddcc12c5b0172553b4a",
"format": 1
},
{
- "name": "CHANGELOG.md",
+ "name": "README.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9968490b2917b41466fcf58ff804ea27213672db0b91ac0f55ce076246a69913",
+ "chksum_sha256": "5514773c36ef505855887df54a38b3c7d496dc6b5532f8474fbbe2160ac5f8de",
"format": 1
},
{
- "name": "requirements-azure.txt",
+ "name": "CHANGELOG.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "5ed84a767e40fa7e5bdb9dccd739cb355185b1414a1beab6892a1bb8d3e6062d",
+ "chksum_sha256": "7c3f32f6956b0e39738ae3510371f7fcb977bd0665b592b83ee1b9b9a9aa5f24",
"format": 1
},
{
@@ -5198,7 +5345,7 @@
"name": "plugins/doc_fragments/azure.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f2146d8b6b05f3dc1571069061663ff89f7527880e36a7f4fc4ac2e87ef0ee68",
+ "chksum_sha256": "f7203fe2730a03b98ee8106966dd80dce7ce8ce4a3e4decd9c106b5462b843ba",
"format": 1
},
{
@@ -5226,7 +5373,7 @@
"name": "plugins/module_utils/azure_rm_common.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "0f2d416d8b73431c47251128c3a0b7eee12c412cc63f951ffc64caace37920ac",
+ "chksum_sha256": "f7fb7925b32c02007adcd2bbf2359813376f358976d59ca315586f27908b2eff",
"format": 1
},
{
@@ -5324,7 +5471,7 @@
"name": "plugins/modules/azure_rm_adapplication_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e04053b0c584772df10dcd46845772eefb57116b655ced27ee6786fff8334f12",
+ "chksum_sha256": "7f24aa5ded46894cd49b39c28e79b6abbdac4cb47c31ad982667b0ce439e7d2b",
"format": 1
},
{
@@ -5387,7 +5534,7 @@
"name": "plugins/modules/azure_rm_webapp_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a420da6e6d479985a088e2f2c614bc462ad003d5814d81e4a6db489aef906a83",
+ "chksum_sha256": "52040df37e0732e052577d7bca4f92bef544958f674f271d7154d0f9ccb52123",
"format": 1
},
{
@@ -5426,6 +5573,13 @@
"format": 1
},
{
+ "name": "plugins/modules/azure_rm_virtualnetworkgatewaynatrule.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8760f67750b1eb2994c66d2d8214206a46c153ec8242f93a3e795a3ee77b46d0",
+ "format": 1
+ },
+ {
"name": "plugins/modules/azure_rm_sqlfirewallrule_info.py",
"ftype": "file",
"chksum_type": "sha256",
@@ -5436,7 +5590,7 @@
"name": "plugins/modules/azure_rm_adgroup.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "152a3792aa58f812d46db9dc8b8ebfd3c78abf0406297d4b4c274ba23b398a1e",
+ "chksum_sha256": "06eacd6abd4a6dd192976d020a1e0662dbb9049476fef925da7e51131736c129",
"format": 1
},
{
@@ -5447,6 +5601,13 @@
"format": 1
},
{
+ "name": "plugins/modules/azure_rm_storageaccountmanagementpolicy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "80c77fc072912a82ff2302343051ee8b7993da45d83f0f7eea51cd80beb616d5",
+ "format": 1
+ },
+ {
"name": "plugins/modules/azure_rm_firewallpolicy.py",
"ftype": "file",
"chksum_type": "sha256",
@@ -5478,7 +5639,7 @@
"name": "plugins/modules/azure_rm_webappaccessrestriction_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c72ded459dbd8c58642c77aba5cfab56a86782f4e19958ded25f9754a32c1591",
+ "chksum_sha256": "d9349df2a2c45a49fecc4b74897269a65efc75e103f728875ebacafb9d1d27fa",
"format": 1
},
{
@@ -5534,7 +5695,7 @@
"name": "plugins/modules/azure_rm_aduser_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "59f317a97611776b87dd9d822f4234368027d5de8b98730851ba644ee9259031",
+ "chksum_sha256": "fbef98f5a2d79eda9fd59391c2a07c415fd87dee8d778a11923fb4f0ead02b6b",
"format": 1
},
{
@@ -5646,7 +5807,7 @@
"name": "plugins/modules/azure_rm_networkinterface.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d6f2859a51857b23dd42dc71b71005de7a540c1b8f68178232237a4c0ac06e14",
+ "chksum_sha256": "4c0051e0e4870a4cdab376c746118b7f6710c9420e4d7a49ebbff7c2d1630b53",
"format": 1
},
{
@@ -5800,7 +5961,7 @@
"name": "plugins/modules/azure_rm_cognitivesearch.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8665ce3ff083d18608c7ed9b5fb778d6248e9cfcc096b464535d041457d9af00",
+ "chksum_sha256": "e160b95200bd079eef2f992b4d452f6cbb3314e8668f4d65a63bdd7047bba5e9",
"format": 1
},
{
@@ -5828,14 +5989,21 @@
"name": "plugins/modules/azure_rm_virtualmachine.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b002dccc80c26c8960dc842680bbcce59ec55561a5678d73c5ab1eb248183ce0",
+ "chksum_sha256": "0b8c74e553344c6be40ff70367051cbd300f04e828e2ecaf46890662cc0b5d5f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/azure_rm_sqlmidblongtermretentionpolicy_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "97734732b444fd1f900cf6f72008101c80e68e863a48d76c5f31aae10d891a1c",
"format": 1
},
{
"name": "plugins/modules/azure_rm_backupazurevm.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "697611d8016ee00fdf81ee52c5d2174b670b3aa4a602498220cb4f1ffb455f9b",
+ "chksum_sha256": "6755df5a512af321a7f368cee44850e3107677fdecb549a4b7836d5bdff26651",
"format": 1
},
{
@@ -5874,6 +6042,13 @@
"format": 1
},
{
+ "name": "plugins/modules/azure_rm_sqlmidatabase_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "346bd24a4823dac19815993673f907de4dd715459aca506e2656d20193d3771d",
+ "format": 1
+ },
+ {
"name": "plugins/modules/azure_rm_iotdevice.py",
"ftype": "file",
"chksum_type": "sha256",
@@ -5930,6 +6105,13 @@
"format": 1
},
{
+ "name": "plugins/modules/azure_rm_storageaccountmanagementpolicy_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a43710e515eb2a2d39aff332680a870e9a2375f56750dcccdc7770c1df28a74c",
+ "format": 1
+ },
+ {
"name": "plugins/modules/azure_rm_containerregistryreplication_info.py",
"ftype": "file",
"chksum_type": "sha256",
@@ -6003,7 +6185,7 @@
"name": "plugins/modules/azure_rm_postgresqlflexibleserver.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ea3b2ce15955406227c0367da00cebae6c44e18b9ef0c47720367e4fdd9397fc",
+ "chksum_sha256": "cf3b99161d8f3db99bb8dd2f1bb5be099217e33f56c00dc32c00bb24312d9acd",
"format": 1
},
{
@@ -6084,6 +6266,13 @@
"format": 1
},
{
+ "name": "plugins/modules/azure_rm_localnetworkgateway.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "adc79bbfab386101bdaa959bc83939b389b50c3a232f5cdc7cdbd5a5a52c47c1",
+ "format": 1
+ },
+ {
"name": "plugins/modules/azure_rm_ddosprotectionplan_info.py",
"ftype": "file",
"chksum_type": "sha256",
@@ -6192,7 +6381,7 @@
"name": "plugins/modules/azure_rm_aksagentpool.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "35eede66b4539d8ebfabaeba04dc70f75614d3bc94b2d19a721b527269e44404",
+ "chksum_sha256": "7a35e2d99594b65d9856cdc8f55077d538014a5b150f2432110aec31650097f3",
"format": 1
},
{
@@ -6234,7 +6423,7 @@
"name": "plugins/modules/azure_rm_aks.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d63a516ee5a970d80bea017097e3d474a59b34c0dc58f0d0ecaced542dfbb5fa",
+ "chksum_sha256": "4856852175a9ed9393112b4b6b9cc78d8a7cedd8cdc22af8f1fe8a9c28e95c53",
"format": 1
},
{
@@ -6420,6 +6609,13 @@
"format": 1
},
{
+ "name": "plugins/modules/azure_rm_localnetworkgateway_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "933fe6f6ef580ae3027f0baf07ea18f02612f1f13593a0f63c7650f9464cea48",
+ "format": 1
+ },
+ {
"name": "plugins/modules/azure_rm_mariadbdatabase.py",
"ftype": "file",
"chksum_type": "sha256",
@@ -6549,7 +6745,7 @@
"name": "plugins/modules/azure_rm_webapp.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3905f0d95ea3661bb716b8b1f6c7d0806ce92cb5eeec61824d0ec8657d75824f",
+ "chksum_sha256": "3351258e14bcaee9b5b80e739adf8be765ed9e3d2a0d61fa78066bcf2b9492d1",
"format": 1
},
{
@@ -6563,7 +6759,7 @@
"name": "plugins/modules/azure_rm_postgresqlflexibleserver_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8be887b3634bf62a040d6ffbb5d1d5317d672761144d3967856a574cac4c7f17",
+ "chksum_sha256": "72a200cab9e9e88c9fe97ef18a232470582343ee92a6a2076b861fa5769f4454",
"format": 1
},
{
@@ -6728,6 +6924,20 @@
"format": 1
},
{
+ "name": "plugins/modules/azure_rm_vmsku_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9879aed3be63a091c6751c73f5c975958654d93dcd85e51bc57ff548fcd83761",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/azure_rm_sqlmidatabase.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "73afdcfe76387142017c4cea8bf830840a3466aff5f1b88da9e30d48665aef17",
+ "format": 1
+ },
+ {
"name": "plugins/modules/azure_rm_recoveryservicesvault.py",
"ftype": "file",
"chksum_type": "sha256",
@@ -6735,6 +6945,13 @@
"format": 1
},
{
+ "name": "plugins/modules/azure_rm_virtualnetworkgatewaynatrule_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9e7b7b26dfc0f117e522521801d6e0219796b0bfa4752cb708eb80ff54910fd5",
+ "format": 1
+ },
+ {
"name": "plugins/modules/azure_rm_postgresqlfirewallrule.py",
"ftype": "file",
"chksum_type": "sha256",
@@ -6745,7 +6962,7 @@
"name": "plugins/modules/azure_rm_aksagentpool_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "92c6e3a0f537164ae87f3cf64dd071238a5c8349d7ca84a16826720fc5b82e44",
+ "chksum_sha256": "8fa9bcb4e95c7eb0832f15411c90bdfbd98fa3f7aa4fe18b8e393aa579a99277",
"format": 1
},
{
@@ -6794,7 +7011,7 @@
"name": "plugins/modules/azure_rm_webappaccessrestriction.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a31793686f398ccb3ea7c32aad8341b808e06449bfe5ed468303d74a5406cf2a",
+ "chksum_sha256": "94a2b9c1365943cd7e693801205a1b35a2c4964d41ab626efce5a673c2f7714f",
"format": 1
},
{
@@ -6829,21 +7046,14 @@
"name": "plugins/modules/azure_rm_aduser.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f455cefd4a7af853f672ef7bffbc09422cd9f659841ac35ddc60a4a29c5cde65",
+ "chksum_sha256": "a81615ef44bc971af8f40ebe36ad5a506540f2ac9de62750cd6059eb1253a4ec",
"format": 1
},
{
"name": "plugins/modules/azure_rm_loadbalancer.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "40bcb2a088750276d350e5170a7993a5e92117948835d6bd30dcf5989db2cf11",
- "format": 1
- },
- {
- "name": "plugins/modules/azure_rm_datalakestore_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "64ced3d20472f4e29c76baed328f40e9bc1483583f04fbf7e69e9346b5ed07ad",
+ "chksum_sha256": "3d82e7614ae63f4a2b5b6e5c5154621a51f145431cfc7f8301e7a527c7ad1d49",
"format": 1
},
{
@@ -6892,7 +7102,14 @@
"name": "plugins/modules/azure_rm_virtualmachinescalesetinstance_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d79dad68277ae5d3c8c72e1265606e28b17e19c26d2dd94452723a08b22e490d",
+ "chksum_sha256": "6082701501e00cb0ae70bc7e6e6bc41f801688ec9410ed9b3b792298ee1b1182",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/azure_rm_sqlmidbshorttermretentionpolicy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7d9987c45827fa732438ce3191a5f130ab1c2a4329010c3b5d1afe2cebac3e3e",
"format": 1
},
{
@@ -6913,7 +7130,7 @@
"name": "plugins/modules/azure_rm_virtualmachinescalesetinstance.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "29c4528fce1aea43abda1fed65f9d49ef13b592592174c2305fd581e62cdcf8d",
+ "chksum_sha256": "3bc41c6bdf4497388092f31fa2f93a1714fee34bc31400014d3e48a806f688f9",
"format": 1
},
{
@@ -6973,17 +7190,10 @@
"format": 1
},
{
- "name": "plugins/modules/azure_rm_datalakestore.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "da55af371c87610112c5ff6dd9d8a1c1e3fa38ffd2e3bf8d7887f9a0dfdfd03b",
- "format": 1
- },
- {
"name": "plugins/modules/azure_rm_adapplication.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "49e82eb39f31d45623c7713aa5e6f1a8ed2cb7eeffbd6eeca67a19ac8283687a",
+ "chksum_sha256": "ca40adcbe28168f7543c9cd8c591351797010759b7bb261e4bf6287b1114efde",
"format": 1
},
{
@@ -7060,7 +7270,7 @@
"name": "plugins/modules/azure_rm_postgresqlflexiblefirewallrule.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8db7ef3ea60e698e7b5a65be465d6a639927cc63d91a2da61e1cb6a5f89fe7b8",
+ "chksum_sha256": "312142b7cbb5bc5885faa3150c6b4f4facbd8947e941ca3471b8c3db11e11b68",
"format": 1
},
{
@@ -7123,7 +7333,7 @@
"name": "plugins/modules/azure_rm_adgroup_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c2802b5f00ffe0edaf458e13a4929c6949f61d4d75e1d2b965445eb923fdf206",
+ "chksum_sha256": "0ca71fa39c2fec612f47f1882439eddfa830d7876ebe83e41f6cd649566e6a3d",
"format": 1
},
{
@@ -7169,6 +7379,13 @@
"format": 1
},
{
+ "name": "plugins/modules/azure_rm_sqlmidblongtermretentionpolicy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1542e5381089973684f8b436c6450dc76c42ea86823a5365b8c7a92d14c9adae",
+ "format": 1
+ },
+ {
"name": "plugins/modules/azure_rm_functionapp.py",
"ftype": "file",
"chksum_type": "sha256",
@@ -7207,7 +7424,7 @@
"name": "plugins/modules/azure_rm_accesstoken_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "05f2949f236a8f79c474c1caa57df258695e9f0a87f9072abe4aef9ca1d323d6",
+ "chksum_sha256": "fa14d907bbb728e39ba2d2d641bde7432086f82eb8418939535c5020eed063d4",
"format": 1
},
{
@@ -7260,10 +7477,17 @@
"format": 1
},
{
+ "name": "plugins/modules/azure_rm_sqlmidbshorttermretentionpolicy_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "96b890442e5096495fb8a20e2b9433fd6264be726bc2d6d7ec505c0971487a9b",
+ "format": 1
+ },
+ {
"name": "plugins/modules/azure_rm_rediscache_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a10c697c6832b6d01d4c3645819ee5a8f254715d5b5ac4bdd8db94dbe8fa5602",
+ "chksum_sha256": "1077222318629d3d75a3cb33f028d94a9dd244de03f8e1c365543191475f0100",
"format": 1
},
{
@@ -7333,7 +7557,7 @@
"name": "plugins/inventory/azure_rm.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "47b59006cb06078a81321afbd3b443cb9255af87a78989411a8d42c61f9b66ec",
+ "chksum_sha256": "618f746d6c7754062d64d74b0d062ea2c95a7df483b8c264fe035383fffb6a9a",
"format": 1
},
{
@@ -7347,7 +7571,7 @@
"name": "plugins/lookup/azure_keyvault_secret.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "80c27a8f8599afe6734f00e807a39e293e3569cf09517d2e09254996fe8c8818",
+ "chksum_sha256": "6d83b837326b17f2346dc2ce8d5e8fc49b691d99ac19e7a38960632e68b16969",
"format": 1
},
{
@@ -7361,28 +7585,28 @@
"name": "CONTRIBUTING.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "290a3dfba9b53aecbf86badacfdcb7f320c9a924e02a8f0b4824785a551d0a39",
+ "chksum_sha256": "d8deff7133f403ec517ca4c28704781f3949ec6036dafaa664ad92f15672c625",
"format": 1
},
{
- "name": "azure-pipelines.yml",
+ "name": "sanity-requirements.txt",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1f55cf75a25502f8322df2fc8640e52f85fed4459b8c26fee3c75295f9c45552",
+ "chksum_sha256": "62326475a7ffd00afcdaf8a3d89158f778593e56e8b694a388daf9f523b53648",
"format": 1
},
{
- "name": "CredScanSuppressions.json",
+ "name": "azure-pipelines.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "98c0ee6a0b7a115787cf109a8c0e2ea4a61df6751ecda37915b22ffb44a1128d",
+ "chksum_sha256": "1f55cf75a25502f8322df2fc8640e52f85fed4459b8c26fee3c75295f9c45552",
"format": 1
},
{
- "name": "sanity-requirements-azure.txt",
+ "name": "CredScanSuppressions.json",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "62326475a7ffd00afcdaf8a3d89158f778593e56e8b694a388daf9f523b53648",
+ "chksum_sha256": "98c0ee6a0b7a115787cf109a8c0e2ea4a61df6751ecda37915b22ffb44a1128d",
"format": 1
},
{
@@ -7396,7 +7620,7 @@
"name": "pr-pipelines.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ce013c171143675f4d44e20b4b7d3907c5cee12c53f055c2c3bf6edebb1342ea",
+ "chksum_sha256": "7d55584268373d432d198db717c9da339c660ff169d27ff7dfa665f317d933a2",
"format": 1
},
{
@@ -7407,6 +7631,13 @@
"format": 1
},
{
+ "name": "requirements.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "83fee6c4cd46119ab129ecb1012323a4e3acef73fcc5c4018cbd1aa89d6dca7a",
+ "format": 1
+ },
+ {
"name": "LICENSE",
"ftype": "file",
"chksum_type": "sha256",
diff --git a/ansible_collections/azure/azcollection/MANIFEST.json b/ansible_collections/azure/azcollection/MANIFEST.json
index c78a03285..7cff91198 100644
--- a/ansible_collections/azure/azcollection/MANIFEST.json
+++ b/ansible_collections/azure/azcollection/MANIFEST.json
@@ -2,7 +2,7 @@
"collection_info": {
"namespace": "azure",
"name": "azcollection",
- "version": "2.3.0",
+ "version": "2.4.0",
"authors": [
"Microsoft <ansible@microsoft.com>"
],
@@ -31,7 +31,7 @@
"name": "FILES.json",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "27255da19a036c3db65d132f41dc1d28980b84543f99724b5d1e8d529804eabe",
+ "chksum_sha256": "639f4d22a875902471bb829d432cc8ce23d4cd45442be62e49782873e19670be",
"format": 1
},
"format": 1
diff --git a/ansible_collections/azure/azcollection/README.md b/ansible_collections/azure/azcollection/README.md
index cdc2953a6..196e4ee7a 100644
--- a/ansible_collections/azure/azcollection/README.md
+++ b/ansible_collections/azure/azcollection/README.md
@@ -24,7 +24,7 @@ ansible-galaxy collection install azure.azcollection
Install dependencies required by the collection (adjust path to collection if necessary):
```bash
-pip3 install -r ~/.ansible/collections/ansible_collections/azure/azcollection/requirements-azure.txt
+pip3 install -r ~/.ansible/collections/ansible_collections/azure/azcollection/requirements.txt
```
To upgrade to the latest version of Azure collection:
diff --git a/ansible_collections/azure/azcollection/meta/execution-environment.yml b/ansible_collections/azure/azcollection/meta/execution-environment.yml
index 08fbf66a3..283dbe334 100644
--- a/ansible_collections/azure/azcollection/meta/execution-environment.yml
+++ b/ansible_collections/azure/azcollection/meta/execution-environment.yml
@@ -1,3 +1,3 @@
dependencies:
- python: requirements-azure.txt
+ python: requirements.txt
version: 1
diff --git a/ansible_collections/azure/azcollection/meta/runtime.yml b/ansible_collections/azure/azcollection/meta/runtime.yml
index 5870f6d89..2da8ce1a3 100644
--- a/ansible_collections/azure/azcollection/meta/runtime.yml
+++ b/ansible_collections/azure/azcollection/meta/runtime.yml
@@ -2,6 +2,7 @@
requires_ansible: '>=2.14.0'
action_groups:
all:
+ - azure.azcollection.azure_rm_accesstoken_info
- azure.azcollection.azure_rm_account_info
- azure.azcollection.azure_rm_adapplication
- azure.azcollection.azure_rm_adapplication_info
@@ -18,6 +19,7 @@ action_groups:
- azure.azcollection.azure_rm_aksagentpool
- azure.azcollection.azure_rm_aksagentpool_info
- azure.azcollection.azure_rm_aksagentpoolversion_info
+ - azure.azcollection.azure_rm_akscredentials_info
- azure.azcollection.azure_rm_aksupgrade_info
- azure.azcollection.azure_rm_aksversion_info
- azure.azcollection.azure_rm_apimanagement
@@ -47,6 +49,7 @@ action_groups:
- azure.azcollection.azure_rm_bastionhost
- azure.azcollection.azure_rm_bastionhost_info
- azure.azcollection.azure_rm_batchaccount
+ - azure.azcollection.azure_rm_batchaccount_info
- azure.azcollection.azure_rm_cdnendpoint
- azure.azcollection.azure_rm_cdnendpoint_info
- azure.azcollection.azure_rm_cdnprofile
@@ -67,8 +70,6 @@ action_groups:
- azure.azcollection.azure_rm_cosmosdbaccount_info
- azure.azcollection.azure_rm_datafactory
- azure.azcollection.azure_rm_datafactory_info
- - azure.azcollection.azure_rm_datalakestore
- - azure.azcollection.azure_rm_datalakestore_info
- azure.azcollection.azure_rm_ddosprotectionplan
- azure.azcollection.azure_rm_ddosprotectionplan_info
- azure.azcollection.azure_rm_deployment
@@ -169,12 +170,20 @@ action_groups:
- azure.azcollection.azure_rm_notificationhub_info
- azure.azcollection.azure_rm_openshiftmanagedcluster
- azure.azcollection.azure_rm_openshiftmanagedcluster_info
+ - azure.azcollection.azure_rm_openshiftmanagedclusterkubeconfig_info
- azure.azcollection.azure_rm_postgresqlconfiguration
- azure.azcollection.azure_rm_postgresqlconfiguration_info
- azure.azcollection.azure_rm_postgresqldatabase
- azure.azcollection.azure_rm_postgresqldatabase_info
- azure.azcollection.azure_rm_postgresqlfirewallrule
- azure.azcollection.azure_rm_postgresqlfirewallrule_info
+ - azure.azcollection.azure_rm_postgresqlflexibleconfiguration_info
+ - azure.azcollection.azure_rm_postgresqlflexibledatabase
+ - azure.azcollection.azure_rm_postgresqlflexibledatabase_info
+ - azure.azcollection.azure_rm_postgresqlflexiblefirewallrule
+ - azure.azcollection.azure_rm_postgresqlflexiblefirewallrule_info
+ - azure.azcollection.azure_rm_postgresqlflexibleserver
+ - azure.azcollection.azure_rm_postgresqlflexibleserver_info
- azure.azcollection.azure_rm_postgresqlserver
- azure.azcollection.azure_rm_postgresqlserver_info
- azure.azcollection.azure_rm_privatednsrecordset
@@ -195,6 +204,8 @@ action_groups:
- azure.azcollection.azure_rm_proximityplacementgroup_info
- azure.azcollection.azure_rm_publicipaddress
- azure.azcollection.azure_rm_publicipaddress_info
+ - azure.azcollection.azure_rm_publicipprefix
+ - azure.azcollection.azure_rm_publicipprefix_info
- azure.azcollection.azure_rm_recoveryservicesvault
- azure.azcollection.azure_rm_recoveryservicesvault_info
- azure.azcollection.azure_rm_rediscache
@@ -231,10 +242,12 @@ action_groups:
- azure.azcollection.azure_rm_sqlelasticpool_info
- azure.azcollection.azure_rm_sqlfirewallrule
- azure.azcollection.azure_rm_sqlfirewallrule_info
- - azure.azcollection.azure_rm_sqlserver
- - azure.azcollection.azure_rm_sqlserver_info
- azure.azcollection.azure_rm_sqlmanagedinstance
- azure.azcollection.azure_rm_sqlmanagedinstance_info
+ - azure.azcollection.azure_rm_sqlserver
+ - azure.azcollection.azure_rm_sqlserver_info
+ - azure.azcollection.azure_rm_sshpublickey
+ - azure.azcollection.azure_rm_sshpublickey_info
- azure.azcollection.azure_rm_storageaccount
- azure.azcollection.azure_rm_storageaccount_info
- azure.azcollection.azure_rm_storageblob
diff --git a/ansible_collections/azure/azcollection/plugins/doc_fragments/azure.py b/ansible_collections/azure/azcollection/plugins/doc_fragments/azure.py
index 74c1286ac..9fc975084 100644
--- a/ansible_collections/azure/azcollection/plugins/doc_fragments/azure.py
+++ b/ansible_collections/azure/azcollection/plugins/doc_fragments/azure.py
@@ -134,7 +134,7 @@ options:
requirements:
- python >= 2.7
- The host that executes this module must have the azure.azcollection collection installed via galaxy
- - All python packages listed in collection's requirements-azure.txt must be installed via pip on the host that executes modules from azure.azcollection
+ - All python packages listed in collection's requirements.txt must be installed via pip on the host that executes modules from azure.azcollection
- Full installation instructions may be found https://galaxy.ansible.com/azure/azcollection
notes:
diff --git a/ansible_collections/azure/azcollection/plugins/inventory/azure_rm.py b/ansible_collections/azure/azcollection/plugins/inventory/azure_rm.py
index 12970dec3..0da9d4fcb 100644
--- a/ansible_collections/azure/azcollection/plugins/inventory/azure_rm.py
+++ b/ansible_collections/azure/azcollection/plugins/inventory/azure_rm.py
@@ -602,7 +602,8 @@ class AzureHost(object):
# set nic-related values from the primary NIC first
for nic in sorted(self.nics, key=lambda n: n.is_primary, reverse=True):
# and from the primary IP config per NIC first
- for ipc in sorted(nic._nic_model['properties']['ipConfigurations'], key=lambda i: i['properties'].get('primary', False), reverse=True):
+ for ipc in sorted(nic._nic_model.get('properties', {}).get('ipConfigurations', []),
+ key=lambda i: i.get('properties', {}).get('primary', False), reverse=True):
try:
subnet = ipc['properties'].get('subnet')
if subnet:
diff --git a/ansible_collections/azure/azcollection/plugins/lookup/azure_keyvault_secret.py b/ansible_collections/azure/azcollection/plugins/lookup/azure_keyvault_secret.py
index 5e693e4b3..6a6dd8f10 100644
--- a/ansible_collections/azure/azcollection/plugins/lookup/azure_keyvault_secret.py
+++ b/ansible_collections/azure/azcollection/plugins/lookup/azure_keyvault_secret.py
@@ -34,6 +34,8 @@ options:
description: Tenant id of service principal.
use_msi:
description: MSI token autodiscover, default is true.
+ cloud_type:
+ description: Specify which cloud, such as C(azure), C(usgovcloudapi).
notes:
- If version is not provided, this plugin will return the latest version of the secret.
- If ansible is running on Azure Virtual Machine with MSI enabled, client_id, secret and tenant isn't required.
@@ -51,6 +53,10 @@ EXAMPLE = """
debug:
msg: msg: "{{ lookup('azure.azcollection.azure_keyvault_secret', 'testsecret', vault_url=key_vault_uri)}}"
+- name: Look up secret with cloud type
+ debug:
+ msg: msg: "{{ lookup('azure.azcollection.azure_keyvault_secret', 'testsecret', cloud_type='usgovcloudapi', vault_url=key_vault_uri)}}"
+
- name: Look up secret when ansible host is MSI enabled Azure VM
debug:
msg: "the value of this secret is {{
@@ -133,15 +139,6 @@ TOKEN_ACQUIRED = False
logger = logging.getLogger("azure.identity").setLevel(logging.ERROR)
-token_params = {
- 'api-version': '2018-02-01',
- 'resource': 'https://vault.azure.net'
-}
-
-token_headers = {
- 'Metadata': 'true'
-}
-
def lookup_secret_non_msi(terms, vault_url, kwargs):
@@ -178,6 +175,15 @@ class LookupModule(LookupBase):
TOKEN_ACQUIRED = False
token = None
+ token_params = {
+ 'api-version': '2018-02-01',
+ 'resource': 'https://vault.{0}.net'.format(kwargs.get('cloud_type', 'azure'))
+ }
+
+ token_headers = {
+ 'Metadata': 'true'
+ }
+
if use_msi:
try:
token_res = requests.get('http://169.254.169.254/metadata/identity/oauth2/token',
diff --git a/ansible_collections/azure/azcollection/plugins/module_utils/azure_rm_common.py b/ansible_collections/azure/azcollection/plugins/module_utils/azure_rm_common.py
index 79b5167b1..c747fc72c 100644
--- a/ansible_collections/azure/azcollection/plugins/module_utils/azure_rm_common.py
+++ b/ansible_collections/azure/azcollection/plugins/module_utils/azure_rm_common.py
@@ -279,8 +279,6 @@ try:
except ImportError:
import azure.mgmt.recoveryservicesbackup.activestamp.models as RecoveryServicesBackupModels
from azure.mgmt.search import SearchManagementClient
- from azure.mgmt.datalake.store import DataLakeStoreAccountManagementClient
- import azure.mgmt.datalake.store.models as DataLakeStoreAccountModel
from azure.mgmt.notificationhubs import NotificationHubsManagementClient
from azure.mgmt.eventhub import EventHubManagementClient
from azure.mgmt.datafactory import DataFactoryManagementClient
@@ -704,6 +702,12 @@ class AzureRMModuleBase(object):
account = self.storage_client.storage_accounts.get_properties(resource_group_name=resource_group_name, account_name=storage_account_name)
if auth_mode == 'login' and self.azure_auth.credentials.get('credential'):
credential = self.azure_auth.credentials['credential']
+ elif (auth_mode == 'login' and self.azure_auth.credentials.get('tenant')
+ and self.azure_auth.credentials.get('client_id')
+ and self.azure_auth.credentials.get('secret')):
+ credential = client_secret.ClientSecretCredential(tenant_id=self.azure_auth.credentials.get('tenant'),
+ client_id=self.azure_auth.credentials.get('client_id'),
+ client_secret=self.azure_auth.credentials.get('secret'))
else:
account_keys = self.storage_client.storage_accounts.list_keys(resource_group_name=resource_group_name, account_name=storage_account_name)
credential = account_keys.keys[0].value
@@ -947,11 +951,17 @@ class AzureRMModuleBase(object):
def _ansible_get_models(self, *arg, **kwarg):
return self._ansible_models
- setattr(client, '_ansible_models', importlib.import_module(client_type.__module__).models)
+ try:
+ setattr(client, '_ansible_models', importlib.import_module(client_type.__module__).models)
+ except AttributeError:
+ setattr(client, '_ansible_models', importlib.import_module(client_type.__module__)._models)
client.models = types.MethodType(_ansible_get_models, client)
if self.azure_auth._cert_validation_mode == 'ignore':
- client._config.session_configuration_callback = self._validation_ignore_callback
+ if hasattr(client, '_config'):
+ client._config.session_configuration_callback = self._validation_ignore_callback
+ else:
+ client.config.session_configuration_callback = self._validation_ignore_callback
return client
@@ -1361,19 +1371,6 @@ class AzureRMModuleBase(object):
return self._search_client
@property
- def datalake_store_client(self):
- self.log('Getting datalake store client...')
- if not self._datalake_store_client:
- self._datalake_store_client = self.get_mgmt_svc_client(DataLakeStoreAccountManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager,
- api_version='2016-11-01')
- return self._datalake_store_client
-
- @property
- def datalake_store_models(self):
- return DataLakeStoreAccountModel
-
- @property
def notification_hub_client(self):
self.log('Getting notification hub client')
if not self._notification_hub_client:
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_accesstoken_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_accesstoken_info.py
index cf9569868..c020d4dd1 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_accesstoken_info.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_accesstoken_info.py
@@ -95,7 +95,7 @@ class AzureRMAccessToken(AzureRMModuleBase):
super(AzureRMAccessToken, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=False,
- is_ad_resource=False)
+ is_ad_resource=True)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()):
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adapplication.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adapplication.py
index b428463aa..33270da27 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adapplication.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adapplication.py
@@ -146,33 +146,101 @@ options:
optional_claims:
description:
- Declare the optional claims for the application.
- type: list
- elements: dict
+ type: dict
suboptions:
- name:
- description:
- - The name of the optional claim.
- type: str
- required: True
- source:
+ access_token_claims :
description:
- - The source (directory object) of the claim.
- - There are predefined claims and user-defined claims from extension properties.
- - If the source value is null, the claim is a predefined optional claim.
- - If the source value is user, the value in the name property is the extension property from the user object.
- type: str
- essential:
+ - The optional claims returned in the JWT access token
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - The name of the optional claim.
+ type: str
+ required: True
+ source:
+ description:
+ - The source (directory object) of the claim.
+ - There are predefined claims and user-defined claims from extension properties.
+ - If the source value is null, the claim is a predefined optional claim.
+ - If the source value is user, the value in the name property is the extension property from the user object.
+ type: str
+ essential:
+ description:
+ - If the value is true, the claim specified by the client is necessary to ensure a smooth authorization experience\
+ for the specific task requested by the end user.
+ - The default value is false.
+ default: false
+ type: bool
+ additional_properties:
+ description:
+ - Additional properties of the claim.
+ - If a property exists in this collection, it modifies the behavior of the optional claim specified in the name property.
+ type: list
+ elements: str
+ id_token_claims:
description:
- - If the value is true, the claim specified by the client is necessary to ensure a smooth authorization experience
- for the specific task requested by the end user.
- - The default value is false.
- default: false
- type: bool
- additional_properties:
+ - The optional claims returned in the JWT ID token
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - The name of the optional claim.
+ type: str
+ required: True
+ source:
+ description:
+ - The source (directory object) of the claim.
+ - There are predefined claims and user-defined claims from extension properties.
+ - If the source value is null, the claim is a predefined optional claim.
+ - If the source value is user, the value in the name property is the extension property from the user object.
+ type: str
+ essential:
+ description:
+ - If the value is true, the claim specified by the client is necessary to ensure a smooth authorization experience\
+ for the specific task requested by the end user.
+ - The default value is false.
+ default: false
+ type: bool
+ additional_properties:
+ description:
+ - Additional properties of the claim.
+ - If a property exists in this collection, it modifies the behavior of the optional claim specified in the name property.
+ type: list
+ elements: str
+ saml2_token_claims:
description:
- - Additional properties of the claim.
- - If a property exists in this collection, it modifies the behavior of the optional claim specified in the name property.
- type: str
+ - The optional claims returned in the SAML token
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - The name of the optional claim.
+ type: str
+ required: True
+ source:
+ description:
+ - The source (directory object) of the claim.
+ - There are predefined claims and user-defined claims from extension properties.
+ - If the source value is null, the claim is a predefined optional claim.
+ - If the source value is user, the value in the name property is the extension property rom the user object.
+ type: str
+ essential:
+ description:
+ - If the value is true, the claim specified by the client is necessary to ensure a smooth authorization experience\
+ for the specific task requested by the end user.
+ - The default value is false.
+ default: false
+ type: bool
+ additional_properties:
+ description:
+ - Additional properties of the claim.
+ - If a property exists in this collection, it modifies the behavior of the optional claim specified in the name property.
+ type: list
+ elements: str
password:
description:
- App password, aka 'client secret'.
@@ -293,87 +361,99 @@ EXAMPLES = '''
'''
RETURN = '''
-output:
+display_name:
+ description:
+ - Object's display name or its prefix.
+ type: str
+ returned: always
+ sample: fredAKSCluster
+app_id:
+ description:
+ - The application ID.
+ returned: always
+ type: str
+ sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+object_id:
+ description:
+ - Object ID of the application
+ returned: always
+ type: str
+ sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+sign_in_audience:
description:
- - Current state of the adapplication.
+ - The application can be used from any Azure AD tenants.
+ returned: always
+ type: str
+ sample: AzureADandPersonalMicrosoftAccount
+available_to_other_tenants:
+ description:
+ - The application can be used from any Azure AD tenants.
+ returned: always
+ type: str
+ sample: AzureADandPersonalMicrosoftAccount
+homepage:
+ description:
+ - The url where users can sign in and use your app.
+ returned: always
+ type: str
+ sample: null
+identifier_uris:
+ description:
+ - Space-separated unique URIs that Azure AD can use for this app.
+ returned: always
+ type: list
+ sample: []
+oauth2_allow_implicit_flow:
+ description:
+ - Whether to allow implicit grant flow for OAuth2.
+ returned: always
+ type: bool
+ sample: false
+public_client_reply_urls:
+ description:
+ - The public client redirect urls.
+ - Space-separated URIs to which Azure AD will redirect in response to an OAuth 2.0 request.
+ returned: always
+ type: list
+ sample: []
+web_reply_urls:
+ description:
+ - The web redirect urls.
+ - Space-separated URIs to which Azure AD will redirect in response to an OAuth 2.0 request.
+ returned: always
+ type: list
+ sample: []
+spa_reply_urls:
+ description:
+ - The spa redirect urls.
+ - Space-separated URIs to which Azure AD will redirect in response to an OAuth 2.0 request.
+ returned: always
+ type: list
+ sample: []
+optional_claims:
+ description:
+ - Declare the optional claims for the application.
type: complex
- returned: awalys
+ returned: always
contains:
- display_name:
- description:
- - Object's display name or its prefix.
- type: str
- returned: always
- sample: fredAKSCluster
- app_id:
- description:
- - The application ID.
- returned: always
- type: str
- sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
- object_id:
- description:
- - Object ID of the application
- returned: always
- type: str
- sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
- sign_in_audience:
- description:
- - The application can be used from any Azure AD tenants.
- returned: always
- type: str
- sample: AzureADandPersonalMicrosoftAccount
- available_to_other_tenants:
- description:
- - The application can be used from any Azure AD tenants.
- returned: always
- type: str
- sample: AzureADandPersonalMicrosoftAccount
- homepage:
- description:
- - The url where users can sign in and use your app.
- returned: always
- type: str
- sample: null
- identifier_uris:
+ access_token_claims :
description:
- - Space-separated unique URIs that Azure AD can use for this app.
- returned: always
+ - The optional claims returned in the JWT access token
type: list
- sample: []
- oauth2_allow_implicit_flow:
- description:
- - Whether to allow implicit grant flow for OAuth2.
returned: always
- type: bool
- sample: false
- optional_claims:
+ sample: ['name': 'aud', 'source': null, 'essential': false, 'additional_properties': []]
+ id_token_claims:
description:
- - The optional claims for the application.
- returned: always
+ - The optional claims returned in the JWT ID token
type: list
- sample: []
- public_client_reply_urls:
- description:
- - The public client redirect urls.
- - Space-separated URIs to which Azure AD will redirect in response to an OAuth 2.0 request.
returned: always
- type: list
- sample: []
- web_reply_urls:
+ sample: ['name': 'acct', 'source': null, 'essential': false, 'additional_properties': []]
+ saml2_token_claims:
description:
- - The web redirect urls.
- - Space-separated URIs to which Azure AD will redirect in response to an OAuth 2.0 request.
- returned: always
+ - The optional claims returned in the SAML token
type: list
- sample: []
- spa_reply_urls:
- description:
- - The spa redirect urls.
- - Space-separated URIs to which Azure AD will redirect in response to an OAuth 2.0 request.
returned: always
- type: list
- sample: []
+ sample: ['name': 'acct', 'source': null, 'essential': false, 'additional_properties': []]
'''
from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt
@@ -395,6 +475,8 @@ try:
from msgraph.generated.models.spa_application import SpaApplication
from msgraph.generated.models.public_client_application import PublicClientApplication
from msgraph.generated.models.implicit_grant_settings import ImplicitGrantSettings
+ from msgraph.generated.models.optional_claim import OptionalClaim
+ from msgraph.generated.models.optional_claims import OptionalClaims
except ImportError:
# This is handled in azure_rm_common
pass
@@ -419,7 +501,7 @@ app_role_spec = dict(
)
)
-optional_claims_spec = dict(
+claims_spec = dict(
name=dict(
type='str',
required=True
@@ -432,9 +514,11 @@ optional_claims_spec = dict(
default=False
),
additional_properties=dict(
- type='str'
+ type='list',
+ elements='str'
)
)
+
required_resource_accesses_spec = dict(
resource_app_id=dict(
type='str'
@@ -481,7 +565,14 @@ class AzureRMADApplication(AzureRMModuleBaseExt):
key_value=dict(type='str', no_log=True),
native_app=dict(type='bool'),
oauth2_allow_implicit_flow=dict(type='bool'),
- optional_claims=dict(type='list', elements='dict', options=optional_claims_spec),
+ optional_claims=dict(
+ type='dict',
+ options=dict(
+ access_token_claims=dict(type='list', elements='dict', no_log=True, options=claims_spec),
+ id_token_claims=dict(type='list', elements='dict', no_log=True, options=claims_spec),
+ saml2_token_claims=dict(type='list', elements='dict', no_log=True, options=claims_spec),
+ )
+ ),
password=dict(type='str', no_log=True),
public_client_reply_urls=dict(type='list', elements='str'),
web_reply_urls=dict(type='list', elements='str', aliases=['reply_urls']),
@@ -559,6 +650,9 @@ class AzureRMADApplication(AzureRMModuleBaseExt):
if self.app_roles:
app_roles = self.build_app_roles(self.app_roles)
+ if self.optional_claims:
+ optional_claims = self.build_optional_claims(self.optional_claims)
+
create_app = Application(
sign_in_audience=self.sign_in_audience,
web=WebApplication(
@@ -576,7 +670,7 @@ class AzureRMADApplication(AzureRMModuleBaseExt):
password_credentials=password_creds,
required_resource_access=required_accesses,
app_roles=app_roles,
- optional_claims=self.optional_claims
+ optional_claims=optional_claims
# allow_guests_sign_in=self.allow_guests_sign_in,
)
response = asyncio.get_event_loop().run_until_complete(self.create_application(create_app))
@@ -603,6 +697,9 @@ class AzureRMADApplication(AzureRMModuleBaseExt):
if self.app_roles:
app_roles = self.build_app_roles(self.app_roles)
+ if self.optional_claims:
+ optional_claims = self.build_optional_claims(self.optional_claims)
+
app_update_param = Application(
sign_in_audience=self.sign_in_audience,
web=WebApplication(
@@ -621,7 +718,7 @@ class AzureRMADApplication(AzureRMModuleBaseExt):
required_resource_access=required_accesses,
# allow_guests_sign_in=self.allow_guests_sign_in,
app_roles=app_roles,
- optional_claims=self.optional_claims)
+ optional_claims=optional_claims)
asyncio.get_event_loop().run_until_complete(self.update_application(
obj_id=old_response['object_id'], update_app=app_update_param))
@@ -665,6 +762,15 @@ class AzureRMADApplication(AzureRMModuleBaseExt):
return True
return False
+ def serialize_claims(self, claims):
+ if claims is None:
+ return None
+ return [{
+ "additional_properties": claim.additional_properties,
+ "essential": claim.essential,
+ "name": claim.name,
+ "source": claim.source} for claim in claims]
+
def to_dict(self, object):
app_roles = [{
'id': app_role.id,
@@ -673,6 +779,11 @@ class AzureRMADApplication(AzureRMModuleBaseExt):
'value': app_role.value,
"description": app_role.description
} for app_role in object.app_roles]
+ optional_claims = {
+ "access_token": self.serialize_claims(object.optional_claims.access_token),
+ "id_token": self.serialize_claims(object.optional_claims.id_token),
+ "saml2_token": self.serialize_claims(object.optional_claims.saml2_token)
+ } if object.optional_claims is not None else object.optional_claims
return dict(
app_id=object.app_id,
object_id=object.id,
@@ -683,7 +794,7 @@ class AzureRMADApplication(AzureRMModuleBaseExt):
homepage=object.web.home_page_url,
identifier_uris=object.identifier_uris,
oauth2_allow_implicit_flow=object.web.implicit_grant_settings.enable_access_token_issuance,
- optional_claims=object.optional_claims,
+ optional_claims=optional_claims,
# allow_guests_sign_in=object.allow_guests_sign_in,
web_reply_urls=object.web.redirect_uris,
spa_reply_urls=object.spa.redirect_uris,
@@ -762,6 +873,25 @@ class AzureRMADApplication(AzureRMModuleBaseExt):
result.append(role)
return result
+ def build_optional_claims(self, optional_claims):
+
+ def build_claims(claims_dict):
+ if claims_dict is None:
+ return None
+ return [OptionalClaim(
+ essential=claim.get("essential"),
+ name=claim.get("name"),
+ source=claim.get("source"),
+ additional_properties=claim.get("additional_properties")
+ ) for claim in claims_dict]
+
+ claims = OptionalClaims(
+ access_token=build_claims(optional_claims.get("access_token_claims")),
+ id_token=build_claims(optional_claims.get("id_token_claims")),
+ saml2_token=build_claims(optional_claims.get("saml2_token_claims"))
+ )
+ return claims
+
async def create_application(self, creat_app):
return await self._client.applications.post(body=creat_app)
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adapplication_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adapplication_info.py
index 167b82552..e3eb53aac 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adapplication_info.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adapplication_info.py
@@ -129,6 +129,30 @@ applications:
returned: always
type: list
sample: []
+ optional_claims:
+ description:
+ - Declare the optional claims for the application.
+ type: complex
+ returned: always
+ contains:
+ access_token_claims :
+ description:
+ - The optional claims returned in the JWT access token
+ type: list
+ returned: always
+ sample: ['name': 'aud', 'source': null, 'essential': false, 'additional_properties': []]
+ id_token_claims:
+ description:
+ - The optional claims returned in the JWT ID token
+ type: list
+ returned: always
+ sample: ['name': 'acct', 'source': null, 'essential': false, 'additional_properties': []]
+ saml2_token_claims:
+ description:
+ - The optional claims returned in the SAML token
+ type: list
+ returned: always
+ sample: ['name': 'acct', 'source': null, 'essential': false, 'additional_properties': []]
'''
from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBase
@@ -191,8 +215,17 @@ class AzureRMADApplicationInfo(AzureRMModuleBase):
return self.results
+ def serialize_claims(self, claims):
+ if claims is None:
+ return None
+ return [{
+ "additional_properties": claim.additional_properties,
+ "essential": claim.essential,
+ "name": claim.name,
+ "source": claim.source} for claim in claims]
+
def to_dict(self, object):
- return dict(
+ response = dict(
app_id=object.app_id,
object_id=object.id,
app_display_name=object.display_name,
@@ -201,9 +234,16 @@ class AzureRMADApplicationInfo(AzureRMModuleBase):
sign_in_audience=object.sign_in_audience,
web_reply_urls=object.web.redirect_uris,
spa_reply_urls=object.spa.redirect_uris,
- public_client_reply_urls=object.public_client.redirect_uris
+ public_client_reply_urls=object.public_client.redirect_uris,
+ optional_claims=dict(access_token=[], id_token=[], saml2_token=[])
)
+ if object.optional_claims is not None:
+ response['optional_claims']['id_token'] = self.serialize_claims(object.optional_claims.id_token)
+ response['optional_claims']['saml2_token'] = self.serialize_claims(object.optional_claims.saml2_token)
+ response['optional_claims']['access_token'] = self.serialize_claims(object.optional_claims.access_token)
+ return response
+
async def get_application(self, obj_id):
return await self._client.applications.by_application_id(obj_id).get()
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adgroup.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adgroup.py
index 1693794a7..4f7f3ed5a 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adgroup.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adgroup.py
@@ -63,6 +63,12 @@ options:
- The azure ad objects asserted to not be owners of the group.
type: list
elements: str
+ raw_membership:
+ description:
+ - By default the group_members return property is flattened and partially filtered of non-User objects before return. \
+ This argument disables those transformations.
+ default: false
+ type: bool
description:
description:
- An optional description for the group.
@@ -109,6 +115,15 @@ EXAMPLES = '''
- "{{ ad_object_1_object_id }}"
- "{{ ad_object_2_object_id }}"
+- name: Ensure Users are Members of a Group using object_id. Specify the group_membership return should be unfiltered
+ azure_rm_adgroup:
+ object_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ state: 'present'
+ present_members:
+ - "{{ ad_object_1_object_id }}"
+ - "{{ ad_object_2_object_id }}"
+ raw_membership: true
+
- name: Ensure Users are not Members of a Group using display_name and mail_nickname
azure_rm_adgroup:
display_name: "Group-Name"
@@ -117,7 +132,7 @@ EXAMPLES = '''
absent_members:
- "{{ ad_object_1_object_id }}"
-- name: Ensure Users are Members of a Group using object_id
+- name: Ensure Users are not Members of a Group using object_id
azure_rm_adgroup:
object_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
state: 'present'
@@ -150,7 +165,7 @@ EXAMPLES = '''
- "{{ ad_object_1_object_id }}"
- "{{ ad_object_2_object_id }}"
-- name: Ensure Users are Owners of a Group using object_id
+- name: Ensure Users are not Owners of a Group using object_id
azure_rm_adgroup:
object_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
state: 'present'
@@ -203,7 +218,7 @@ group_owners:
type: list
group_members:
description:
- - The members of the group.
+ - The members of the group. If raw_membership is false, this contains the transitive members property. Otherwise, it contains the members property.
returned: always
type: list
description:
@@ -222,6 +237,7 @@ try:
from msgraph.generated.models.group import Group
from msgraph.generated.groups.item.transitive_members.transitive_members_request_builder import \
TransitiveMembersRequestBuilder
+ from msgraph.generated.groups.item.group_item_request_builder import GroupItemRequestBuilder
from msgraph.generated.models.reference_create import ReferenceCreate
except ImportError:
# This is handled in azure_rm_common
@@ -239,6 +255,7 @@ class AzureRMADGroup(AzureRMModuleBase):
present_owners=dict(type='list', elements='str'),
absent_members=dict(type='list', elements='str'),
absent_owners=dict(type='list', elements='str'),
+ raw_membership=dict(type='bool', default=False),
description=dict(type='str'),
state=dict(
type='str',
@@ -257,6 +274,7 @@ class AzureRMADGroup(AzureRMModuleBase):
self.state = None
self.results = dict(changed=False)
self._client = None
+ self.raw_membership = False
super(AzureRMADGroup, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=False,
@@ -267,9 +285,6 @@ class AzureRMADGroup(AzureRMModuleBase):
for key in list(self.module_arg_spec.keys()):
setattr(self, key, kwargs[key])
- # TODO remove ad_groups return. Returns as one object always
- ad_groups = []
-
try:
self._client = self.get_msgraph_client()
ad_groups = []
@@ -280,12 +295,38 @@ class AzureRMADGroup(AzureRMModuleBase):
if ad_groups:
self.object_id = ad_groups[0].id
- elif self.object_id:
+ if self.object_id:
ad_groups = [asyncio.get_event_loop().run_until_complete(self.get_group(self.object_id))]
if ad_groups:
if self.state == "present":
self.results["changed"] = False
+
+ if self.description is not None and self.description != ad_groups[0].description:
+ self.results["changed"] = True
+ else:
+ self.description = ad_groups[0].description
+ if self.display_name is not None and self.display_name != ad_groups[0].display_name:
+ self.results["changed"] = True
+ else:
+ self.display_name = ad_groups[0].display_name
+ if self.mail_nickname is not None and self.mail_nickname != ad_groups[0].mail_nickname:
+ self.results["changed"] = True
+ else:
+ self.mail_nickname = ad_groups[0].mail_nickname
+ if self.results["changed"]:
+ group = Group(
+ mail_enabled=False,
+ security_enabled=True,
+ group_types=[],
+ display_name=self.display_name,
+ mail_nickname=self.mail_nickname,
+ description=self.description
+ )
+
+ asyncio.get_event_loop().run_until_complete(self.update_group(ad_groups[0].id, group))
+ ad_groups = [asyncio.get_event_loop().run_until_complete(self.get_group(self.object_id))]
+
elif self.state == "absent":
asyncio.get_event_loop().run_until_complete(self.delete_group(self.object_id))
ad_groups = []
@@ -325,7 +366,7 @@ class AzureRMADGroup(AzureRMModuleBase):
if self.present_members or self.absent_members:
ret = asyncio.get_event_loop().run_until_complete(self.get_group_members(group_id))
- current_members = [object.id for object in ret.value]
+ current_members = [object.id for object in ret]
if self.present_members:
present_members_by_object_id = self.dictionary_from_object_urls(self.present_members)
@@ -361,7 +402,7 @@ class AzureRMADGroup(AzureRMModuleBase):
if owners_to_add:
for owner_object_id in owners_to_add:
asyncio.get_event_loop().run_until_complete(
- self.add_gropup_owner(group_id, present_owners_by_object_id[owner_object_id]))
+ self.add_group_owner(group_id, present_owners_by_object_id[owner_object_id]))
self.results["changed"] = True
if self.absent_owners:
@@ -369,7 +410,7 @@ class AzureRMADGroup(AzureRMModuleBase):
if owners_to_remove:
for owner in owners_to_remove:
- asyncio.get_event_loop().run_until_complete(self.remove_gropup_owner(group_id, owner))
+ asyncio.get_event_loop().run_until_complete(self.remove_group_owner(group_id, owner))
self.results["changed"] = True
def dictionary_from_object_urls(self, object_urls):
@@ -439,10 +480,13 @@ class AzureRMADGroup(AzureRMModuleBase):
if results["object_id"] and (self.present_members or self.absent_members):
ret = asyncio.get_event_loop().run_until_complete(self.get_group_members(results["object_id"]))
- results["group_members"] = [self.result_to_dict(object) for object in ret.value]
+ results["group_members"] = [self.result_to_dict(object) for object in ret]
return results
+ async def update_group(self, group_id, group):
+ return await self._client.groups.by_group_id(group_id).patch(body=group)
+
async def create_group(self, create_group):
return await self._client.groups.post(body=create_group)
@@ -469,6 +513,12 @@ class AzureRMADGroup(AzureRMModuleBase):
return []
async def get_group_members(self, group_id, filters=None):
+ if self.raw_membership:
+ return await self.get_raw_group_members(group_id, filters)
+ else:
+ return await self.get_transitive_group_members(group_id, filters)
+
+ async def get_transitive_group_members(self, group_id, filters=None):
request_configuration = TransitiveMembersRequestBuilder.TransitiveMembersRequestBuilderGetRequestConfiguration(
query_parameters=TransitiveMembersRequestBuilder.TransitiveMembersRequestBuilderGetQueryParameters(
count=True,
@@ -476,8 +526,22 @@ class AzureRMADGroup(AzureRMModuleBase):
)
if filters:
request_configuration.query_parameters.filter = filters
- return await self._client.groups.by_group_id(group_id).transitive_members.get(
+ response = await self._client.groups.by_group_id(group_id).transitive_members.get(
request_configuration=request_configuration)
+ return response.value
+
+ async def get_raw_group_members(self, group_id, filters=None):
+ request_configuration = GroupItemRequestBuilder.GroupItemRequestBuilderGetRequestConfiguration(
+ query_parameters=GroupItemRequestBuilder.GroupItemRequestBuilderGetQueryParameters(
+ # this ensures service principals are returned
+ # see https://learn.microsoft.com/en-us/graph/api/group-list-members?view=graph-rest-1.0&tabs=http
+ expand=["members"]
+ ),
+ )
+ if filters:
+ request_configuration.query_parameters.filter = filters
+ group = await self._client.groups.by_group_id(group_id).get(request_configuration=request_configuration)
+ return group.members
async def add_group_member(self, group_id, obj_id):
request_body = ReferenceCreate(
@@ -496,13 +560,13 @@ class AzureRMADGroup(AzureRMModuleBase):
)
return await self._client.groups.by_group_id(group_id).owners.get(request_configuration=request_configuration)
- async def add_gropup_owner(self, group_id, obj_id):
+ async def add_group_owner(self, group_id, obj_id):
request_body = ReferenceCreate(
odata_id="https://graph.microsoft.com/v1.0/users/{0}".format(obj_id),
)
await self._client.groups.by_group_id(group_id).owners.ref.post(body=request_body)
- async def remove_gropup_owner(self, group_id, obj_id):
+ async def remove_group_owner(self, group_id, obj_id):
await self._client.groups.by_group_id(group_id).owners.by_directory_object_id(obj_id).ref.delete()
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adgroup_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adgroup_info.py
index 3525bdf1b..04393c02e 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adgroup_info.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adgroup_info.py
@@ -55,6 +55,12 @@ options:
- Indicate whether the groups in which a groups is a member should be returned with the returned groups.
default: False
type: bool
+ raw_membership:
+ description:
+ - By default the group_members return property is flattened and partially filtered of non-User objects before return.\
+ This argument disables those transformations.
+ default: false
+ type: bool
all:
description:
- If True, will return all groups in tenant.
@@ -84,6 +90,12 @@ EXAMPLES = '''
object_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
return_owners: true
return_group_members: true
+- name: Return a specific group using object_id and return the owners and members of the group. Return service principals and nested groups.
+ azure_rm_adgroup_info:
+ object_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ return_owners: true
+ return_group_members: true
+ raw_membership: true
- name: Return a specific group using object_id and return the groups the group is a member of
azure_rm_adgroup_info:
@@ -153,7 +165,7 @@ group_owners:
type: list
group_members:
description:
- - The members of the group.
+ - The members of the group. If raw_membership is set, this field may contain non-user objects (groups, service principals, etc)
returned: always
type: list
description:
@@ -173,6 +185,7 @@ try:
TransitiveMembersRequestBuilder
from msgraph.generated.groups.item.get_member_groups.get_member_groups_post_request_body import \
GetMemberGroupsPostRequestBody
+ from msgraph.generated.groups.item.group_item_request_builder import GroupItemRequestBuilder
except ImportError:
# This is handled in azure_rm_common
pass
@@ -190,6 +203,7 @@ class AzureRMADGroupInfo(AzureRMModuleBase):
return_owners=dict(type='bool', default=False),
return_group_members=dict(type='bool', default=False),
return_member_groups=dict(type='bool', default=False),
+ raw_membership=dict(type='bool', default=False),
all=dict(type='bool', default=False),
)
@@ -201,6 +215,7 @@ class AzureRMADGroupInfo(AzureRMModuleBase):
self.return_owners = False
self.return_group_members = False
self.return_member_groups = False
+ self.raw_membership = False
self.all = False
self.results = dict(changed=False)
@@ -301,7 +316,7 @@ class AzureRMADGroupInfo(AzureRMModuleBase):
if results["object_id"] and self.return_group_members:
ret = asyncio.get_event_loop().run_until_complete(self.get_group_members(results["object_id"]))
- results["group_members"] = [self.result_to_dict(object) for object in ret.value]
+ results["group_members"] = [self.result_to_dict(object) for object in ret]
if results["object_id"] and self.return_member_groups:
ret = asyncio.get_event_loop().run_until_complete(self.get_member_groups(results["object_id"]))
@@ -310,7 +325,7 @@ class AzureRMADGroupInfo(AzureRMModuleBase):
if results["object_id"] and self.check_membership:
filter = "id eq '{0}' ".format(self.check_membership)
ret = asyncio.get_event_loop().run_until_complete(self.get_group_members(results["object_id"], filter))
- results["is_member_of"] = True if ret.value and len(ret.value) != 0 else False
+ results["is_member_of"] = True if ret and len(ret) != 0 else False
return results
@@ -352,17 +367,34 @@ class AzureRMADGroupInfo(AzureRMModuleBase):
return await self._client.groups.by_group_id(group_id).owners.get(request_configuration=request_configuration)
async def get_group_members(self, group_id, filters=None):
+ if self.raw_membership:
+ return await self.get_raw_group_members(group_id, filters)
+ else:
+ return await self.get_transitive_group_members(group_id, filters)
+
+ async def get_transitive_group_members(self, group_id, filters=None):
request_configuration = TransitiveMembersRequestBuilder.TransitiveMembersRequestBuilderGetRequestConfiguration(
query_parameters=TransitiveMembersRequestBuilder.TransitiveMembersRequestBuilderGetQueryParameters(
count=True,
- select=['id', 'displayName', 'userPrincipalName', 'mailNickname', 'mail', 'accountEnabled', 'userType',
- 'appId', 'appRoleAssignmentRequired']
-
),
)
if filters:
request_configuration.query_parameters.filter = filters
- return await self._client.groups.by_group_id(group_id).transitive_members.get(
+ response = await self._client.groups.by_group_id(group_id).transitive_members.get(
+ request_configuration=request_configuration)
+ return response.value
+
+ async def get_raw_group_members(self, group_id, filters=None):
+ request_configuration = GroupItemRequestBuilder.GroupItemRequestBuilderGetRequestConfiguration(
+ query_parameters=GroupItemRequestBuilder.GroupItemRequestBuilderGetQueryParameters(
+ # this ensures service principals are returned
+ # see https://learn.microsoft.com/en-us/graph/api/group-list-members?view=graph-rest-1.0&tabs=http
+ expand=["members"]
+ ),
+ )
+ if filters:
+ request_configuration.query_parameters.filter = filters
+ return await self._client.groups.by_group_id(group_id).members.get(
request_configuration=request_configuration)
async def get_member_groups(self, obj_id):
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aduser.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aduser.py
index 1e0a238c0..e1c792649 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aduser.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aduser.py
@@ -119,6 +119,18 @@ options:
- The maximum length is 64 characters.Returned only on $select.
- Supports $filter (eq, ne, not, ge, le, in, startsWith, and eq on null values).
type: str
+ on_premises_extension_attributes:
+ description:
+ - Contains extensionAttributes1-15 for the user.
+ - These extension attributes are also known as Exchange custom attributes 1-15.
+ - For an onPremisesSyncEnabled user, the source of authority for this set of properties is the on-premises and is read-only.
+ - For a cloud-only user (where onPremisesSyncEnabled is false), these properties can be set during the creation or update of a user object.
+ - For a cloud-only user previously synced from on-premises Active Directory, these properties are read-only in Microsoft Graph\
+ but can be fully managed through the Exchange Admin Center or the Exchange Online V2 module in PowerShell.
+ type: dict
+ aliases:
+ - extension_attributes
+
extends_documentation_fragment:
- azure.azcollection.azure
@@ -143,6 +155,10 @@ EXAMPLES = '''
usage_location: "US"
mail: "{{ user_principal_name }}@contoso.com"
company_name: 'Test Company'
+ on_premises_extension_attributes:
+ extension_attribute1: "test_extension_attribute1"
+ extension_attribute2: "test_extension_attribute2"
+ extension_attribute11: "test_extension_attribute11"
- name: Update user with new value for account_enabled
azure_rm_aduser:
@@ -205,6 +221,17 @@ company_name:
type: str
returned: always
sample: 'Test Company'
+on_premises_extension_attributes:
+ description:
+ - Contains extensionAttributes1-15 for the user.
+ - These extension attributes are also known as Exchange custom attributes 1-15.
+ - For an onPremisesSyncEnabled user, the source of authority for this set of properties is the on-premises and is read-only.
+ - For a cloud-only user (where onPremisesSyncEnabled is false), these properties can be set during the creation or update of a user object.
+ - For a cloud-only user previously synced from on-premises Active Directory, these properties are read-only in Microsoft Graph\
+ but can be fully managed through the Exchange Admin Center or the Exchange Online V2 module in PowerShell.
+ type: dict
+ returned: always
+ sample: {}
'''
from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBase
@@ -212,6 +239,7 @@ from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common
try:
import asyncio
from msgraph.generated.models.password_profile import PasswordProfile
+ from msgraph.generated.models.on_premises_extension_attributes import OnPremisesExtensionAttributes
from msgraph.generated.models.user import User
from msgraph.generated.users.users_request_builder import UsersRequestBuilder
except ImportError:
@@ -239,7 +267,8 @@ class AzureRMADUser(AzureRMModuleBase):
surname=dict(type='str'),
user_type=dict(type='str'),
mail=dict(type='str'),
- company_name=dict(type='str')
+ company_name=dict(type='str'),
+ on_premises_extension_attributes=dict(type='dict', aliases=['extension_attributes'])
)
self.user_principal_name = None
@@ -259,6 +288,7 @@ class AzureRMADUser(AzureRMModuleBase):
self.user_type = None
self.mail = None
self.company_name = None
+ self.on_premises_extension_attributes = None
self.log_path = None
self.log_mode = None
@@ -288,6 +318,13 @@ class AzureRMADUser(AzureRMModuleBase):
if self.state == 'present':
+ extension_attributes = None
+
+ if self.on_premises_extension_attributes:
+ extension_attributes = OnPremisesExtensionAttributes(
+ **self.on_premises_extension_attributes
+ )
+
if ad_user: # Update, changed
password = None
@@ -298,7 +335,6 @@ class AzureRMADUser(AzureRMModuleBase):
)
should_update = False
-
if self.on_premises_immutable_id and ad_user.on_premises_immutable_id != self.on_premises_immutable_id:
should_update = True
if should_update or self.usage_location and ad_user.usage_location != self.usage_location:
@@ -321,9 +357,12 @@ class AzureRMADUser(AzureRMModuleBase):
should_update = True
if should_update or self.company_name and ad_user.company_name != self.company_name:
should_update = True
-
+ if should_update or (
+ self.on_premises_extension_attributes and
+ self.on_premises_extension_attributes_to_dict(ad_user.on_premises_extension_attributes) != self.on_premises_extension_attributes):
+ should_update = True
if should_update:
- asyncio.get_event_loop().run_until_complete(self.update_user(ad_user, password))
+ asyncio.get_event_loop().run_until_complete(self.update_user(ad_user, password, extension_attributes))
self.results['changed'] = True
@@ -335,7 +374,7 @@ class AzureRMADUser(AzureRMModuleBase):
self.results['changed'] = False
else: # Create, changed
- asyncio.get_event_loop().run_until_complete(self.create_user())
+ asyncio.get_event_loop().run_until_complete(self.create_user(extension_attributes))
self.results['changed'] = True
ad_user = self.get_exisiting_user()
@@ -391,6 +430,16 @@ class AzureRMADUser(AzureRMModuleBase):
raise
return ad_user
+ def on_premises_extension_attributes_to_dict(self, on_premises_extension_attributes):
+ extension_attributes = {}
+ for index in range(1, 16 + 1):
+ attribute_name = f'extension_attribute{index}'
+ if hasattr(on_premises_extension_attributes, attribute_name):
+ attr_value = getattr(on_premises_extension_attributes, attribute_name)
+ if attr_value is not None:
+ extension_attributes[attribute_name] = attr_value
+ return extension_attributes
+
def to_dict(self, object):
return dict(
object_id=object.id,
@@ -400,10 +449,11 @@ class AzureRMADUser(AzureRMModuleBase):
mail=object.mail,
account_enabled=object.account_enabled,
user_type=object.user_type,
- company_name=object.company_name
+ company_name=object.company_name,
+ on_premises_extension_attributes=self.on_premises_extension_attributes_to_dict(object.on_premises_extension_attributes)
)
- async def update_user(self, ad_user, password):
+ async def update_user(self, ad_user, password, extension_attributes):
request_body = User(
on_premises_immutable_id=self.on_premises_immutable_id,
usage_location=self.usage_location,
@@ -415,11 +465,12 @@ class AzureRMADUser(AzureRMModuleBase):
password_profile=password,
user_principal_name=self.user_principal_name,
mail_nickname=self.mail_nickname,
- company_name=self.company_name
+ company_name=self.company_name,
+ on_premises_extension_attributes=extension_attributes
)
return await self._client.users.by_user_id(ad_user.id).patch(body=request_body)
- async def create_user(self):
+ async def create_user(self, extension_attributes):
password = PasswordProfile(
password=self.password_profile
)
@@ -435,7 +486,8 @@ class AzureRMADUser(AzureRMModuleBase):
surname=self.surname,
user_type=self.user_type,
mail=self.mail,
- company_name=self.company_name
+ company_name=self.company_name,
+ on_premises_extension_attributes=extension_attributes
)
return await self._client.users.post(body=request_body)
@@ -446,7 +498,8 @@ class AzureRMADUser(AzureRMModuleBase):
request_configuration = UsersRequestBuilder.UsersRequestBuilderGetRequestConfiguration(
query_parameters=UsersRequestBuilder.UsersRequestBuilderGetQueryParameters(
select=["accountEnabled", "displayName", "mail", "mailNickname", "id", "userPrincipalName", "userType",
- "onPremisesImmutableId", "usageLocation", "givenName", "surname", "companyName"]
+ "onPremisesImmutableId", "usageLocation", "givenName", "surname", "companyName",
+ "OnPremisesExtensionAttributes"]
),
)
return await self._client.users.by_user_id(object).get(request_configuration=request_configuration)
@@ -457,7 +510,8 @@ class AzureRMADUser(AzureRMModuleBase):
query_parameters=UsersRequestBuilder.UsersRequestBuilderGetQueryParameters(
filter=filter,
select=["accountEnabled", "displayName", "mail", "mailNickname", "id", "userPrincipalName",
- "userType", "onPremisesImmutableId", "usageLocation", "givenName", "surname", "companyName"],
+ "userType", "onPremisesImmutableId", "usageLocation", "givenName", "surname", "companyName",
+ "OnPremisesExtensionAttributes"],
count=True
),
headers={'ConsistencyLevel': "eventual", }
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aduser_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aduser_info.py
index 98c30be57..e71066a89 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aduser_info.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aduser_info.py
@@ -143,6 +143,17 @@ company_name:
type: str
returned: always
sample: "Test Company"
+on_premises_extension_attributes:
+ description:
+ - Contains extensionAttributes1-15 for the user.
+ - These extension attributes are also known as Exchange custom attributes 1-15.
+ - For an onPremisesSyncEnabled user, the source of authority for this set of properties is the on-premises and is read-only.
+ - For a cloud-only user (where onPremisesSyncEnabled is false), these properties can be set during the creation or update of a user object.
+ - For a cloud-only user previously synced from on-premises Active Directory, these properties are read-only in Microsoft Graph/
+ but can be fully managed through the Exchange Admin Center or the Exchange Online V2 module in PowerShell.
+ type: dict
+ returned: always
+ sample: {}
'''
from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBase
@@ -224,7 +235,6 @@ class AzureRMADUserInfo(AzureRMModuleBase):
elif self.all:
# this returns as a list, since we parse multiple pages
ad_users = asyncio.get_event_loop().run_until_complete(self.get_users())
-
self.results['ad_users'] = [self.to_dict(user) for user in ad_users]
except Exception as e:
@@ -232,6 +242,16 @@ class AzureRMADUserInfo(AzureRMModuleBase):
return self.results
+ def on_premises_extension_attributes_to_dict(self, on_premises_extension_attributes):
+ extension_attributes = {}
+ for index in range(1, 16 + 1):
+ attribute_name = f'extension_attribute{index}'
+ if hasattr(on_premises_extension_attributes, attribute_name):
+ attr_value = getattr(on_premises_extension_attributes, attribute_name)
+ if attr_value is not None:
+ extension_attributes[attribute_name] = attr_value
+ return extension_attributes
+
def to_dict(self, object):
return dict(
object_id=object.id,
@@ -241,13 +261,15 @@ class AzureRMADUserInfo(AzureRMModuleBase):
mail=object.mail,
account_enabled=object.account_enabled,
user_type=object.user_type,
- company_name=object.company_name
+ company_name=object.company_name,
+ on_premises_extension_attributes=self.on_premises_extension_attributes_to_dict(object.on_premises_extension_attributes)
)
async def get_user(self, object):
request_configuration = UsersRequestBuilder.UsersRequestBuilderGetRequestConfiguration(
query_parameters=UsersRequestBuilder.UsersRequestBuilderGetQueryParameters(
- select=["accountEnabled", "displayName", "mail", "mailNickname", "id", "userPrincipalName", "userType", "companyName"]
+ select=["accountEnabled", "displayName", "mail", "mailNickname", "id", "userPrincipalName",
+ "userType", "companyName", "onPremisesExtensionAttributes"]
),
)
return await self._client.users.by_user_id(object).get(request_configuration=request_configuration)
@@ -255,7 +277,8 @@ class AzureRMADUserInfo(AzureRMModuleBase):
async def get_users(self):
request_configuration = UsersRequestBuilder.UsersRequestBuilderGetRequestConfiguration(
query_parameters=UsersRequestBuilder.UsersRequestBuilderGetQueryParameters(
- select=["accountEnabled", "displayName", "mail", "mailNickname", "id", "userPrincipalName", "userType", "companyName"]
+ select=["accountEnabled", "displayName", "mail", "mailNickname", "id", "userPrincipalName",
+ "userType", "companyName", "onPremisesExtensionAttributes"]
),
)
users = []
@@ -276,7 +299,7 @@ class AzureRMADUserInfo(AzureRMModuleBase):
query_parameters=UsersRequestBuilder.UsersRequestBuilderGetQueryParameters(
filter=filter,
select=["accountEnabled", "displayName", "mail", "mailNickname", "id", "userPrincipalName",
- "userType", "companyName"],
+ "userType", "companyName", "onPremisesExtensionAttributes"],
count=True
),
))
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aks.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aks.py
index 0fb5095fe..0e1565a2c 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aks.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aks.py
@@ -15,6 +15,8 @@ version_added: "0.1.2"
short_description: Manage a managed Azure Container Service (AKS) instance
description:
- Create, update and delete a managed Azure Container Service (AKS) instance.
+ - You can only specify C(identity) or C(service_principal), not both. If you don't specify either it will
+ default to identity->type->SystemAssigned.
options:
resource_group:
@@ -170,7 +172,7 @@ options:
type: str
service_principal:
description:
- - The service principal suboptions. If not provided - use system-assigned managed identity.
+ - The service principal suboptions.
type: dict
suboptions:
client_id:
@@ -182,6 +184,25 @@ options:
description:
- The secret password associated with the service principal.
type: str
+ identity:
+ description:
+ - Identity for the Server.
+ type: dict
+ version_added: '2.4.0'
+ suboptions:
+ type:
+ description:
+ - Type of the managed identity
+ required: false
+ choices:
+ - UserAssigned
+ - SystemAssigned
+ default: SystemAssigned
+ type: str
+ user_assigned_identities:
+ description:
+ - User Assigned Managed Identity
+ type: str
enable_rbac:
description:
- Enable RBAC.
@@ -247,6 +268,8 @@ options:
choices:
- loadBalancer
- userDefinedRouting
+ - managedNATGateway
+ - userAssignedNATGateway
api_server_access_profile:
description:
- Profile of API Access configuration.
@@ -590,6 +613,9 @@ state:
provisioning_state: Succeeded
service_principal_profile:
client_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ identity:
+ "type": "UserAssigned"
+ "user_assigned_identities": {}
pod_identity_profile: {
"allow_network_plugin_kubenet": false,
"user_assigned_identities": [
@@ -633,6 +659,7 @@ def create_aks_dict(aks):
kubernetes_version=aks.kubernetes_version,
tags=aks.tags,
linux_profile=create_linux_profile_dict(aks.linux_profile),
+ identity=aks.identity.as_dict() if aks.identity else None,
service_principal_profile=create_service_principal_profile_dict(
aks.service_principal_profile),
provisioning_state=aks.provisioning_state,
@@ -810,7 +837,7 @@ network_profile_spec = dict(
dns_service_ip=dict(type='str'),
docker_bridge_cidr=dict(type='str'),
load_balancer_sku=dict(type='str', choices=['standard', 'basic']),
- outbound_type=dict(type='str', default='loadBalancer', choices=['userDefinedRouting', 'loadBalancer'])
+ outbound_type=dict(type='str', default='loadBalancer', choices=['userDefinedRouting', 'loadBalancer', 'userAssignedNATGateway', 'managedNATGateway'])
)
@@ -830,6 +857,19 @@ api_server_access_profile_spec = dict(
)
+managed_identity_spec = dict(
+ type=dict(type='str', choices=['SystemAssigned', 'UserAssigned'], default='SystemAssigned'),
+ user_assigned_identities=dict(type='str'),
+)
+
+
+class dotdict(dict):
+ """dot.notation access to dictionary attributes"""
+ __getattr__ = dict.get
+ __setattr__ = dict.__setitem__
+ __delattr__ = dict.__delitem__
+
+
class AzureRMManagedCluster(AzureRMModuleBaseExt):
"""Configuration class for an Azure RM container service (AKS) resource"""
@@ -870,6 +910,14 @@ class AzureRMManagedCluster(AzureRMModuleBaseExt):
type='dict',
options=service_principal_spec
),
+ identity=dict(
+ type='dict',
+ options=managed_identity_spec,
+ required_if=[
+ ('type', 'UserAssigned', [
+ 'user_assigned_identities']),
+ ]
+ ),
enable_rbac=dict(
type='bool',
default=False
@@ -930,6 +978,7 @@ class AzureRMManagedCluster(AzureRMModuleBaseExt):
self.linux_profile = None
self.agent_pool_profiles = None
self.service_principal = None
+ self.identity = None
self.enable_rbac = False
self.network_profile = None
self.aad_profile = None
@@ -938,6 +987,8 @@ class AzureRMManagedCluster(AzureRMModuleBaseExt):
self.node_resource_group = None
self.pod_identity_profile = None
+ mutually_exclusive = [('identity', 'service_principal')]
+
required_if = [
('state', 'present', [
'dns_prefix', 'agent_pool_profiles'])
@@ -948,7 +999,8 @@ class AzureRMManagedCluster(AzureRMModuleBaseExt):
super(AzureRMManagedCluster, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=True,
- required_if=required_if)
+ required_if=required_if,
+ mutually_exclusive=mutually_exclusive)
def exec_module(self, **kwargs):
"""Main module execution method"""
@@ -972,6 +1024,11 @@ class AzureRMManagedCluster(AzureRMModuleBaseExt):
available_versions = self.get_all_versions()
if not response:
to_be_updated = True
+ # Default to SystemAssigned if service_principal is not specified
+ if not self.service_principal and not self.identity:
+ self.identity = dotdict({'type': 'SystemAssigned'})
+ if self.identity:
+ changed, self.identity = self.update_identity(self.identity, {})
if self.kubernetes_version not in available_versions.keys():
self.fail("Unsupported kubernetes version. Expected one of {0} but got {1}".format(available_versions.keys(), self.kubernetes_version))
else:
@@ -1118,6 +1175,14 @@ class AzureRMManagedCluster(AzureRMModuleBaseExt):
else:
self.pod_identity_profile = response['pod_identity_profile']
+ # Default to SystemAssigned if service_principal is not specified
+ if not self.service_principal and not self.identity:
+ self.identity = dotdict({'type': 'SystemAssigned'})
+ if self.identity:
+ changed, self.identity = self.update_identity(self.identity, response['identity'])
+ if changed:
+ to_be_updated = True
+
if update_agentpool:
self.log("Need to update agentpool")
if not self.check_mode:
@@ -1177,12 +1242,12 @@ class AzureRMManagedCluster(AzureRMModuleBaseExt):
if self.agent_pool_profiles:
agentpools = [self.create_agent_pool_profile_instance(profile) for profile in self.agent_pool_profiles]
+ # Only service_principal or identity can be specified, but default to SystemAssigned if none specified.
if self.service_principal:
service_principal_profile = self.create_service_principal_profile_instance(self.service_principal)
identity = None
else:
service_principal_profile = None
- identity = self.managedcluster_models.ManagedClusterIdentity(type='SystemAssigned')
if self.linux_profile:
linux_profile = self.create_linux_profile_instance(self.linux_profile)
@@ -1206,7 +1271,7 @@ class AzureRMManagedCluster(AzureRMModuleBaseExt):
service_principal_profile=service_principal_profile,
agent_pool_profiles=agentpools,
linux_profile=linux_profile,
- identity=identity,
+ identity=self.identity,
enable_rbac=self.enable_rbac,
network_profile=self.create_network_profile_instance(self.network_profile),
aad_profile=self.create_aad_profile_instance(self.aad_profile),
@@ -1386,6 +1451,34 @@ class AzureRMManagedCluster(AzureRMModuleBaseExt):
result[name] = self.managedcluster_models.ManagedClusterAddonProfile(config=config, enabled=config['enabled'])
return result
+ # AKS only supports a single UserAssigned Identity
+ def update_identity(self, param_identity, curr_identity):
+ user_identity = None
+ changed = False
+ current_managed_type = curr_identity.get('type', 'SystemAssigned')
+ current_managed_identity = curr_identity.get('user_assigned_identities', {})
+ param_managed_identity = param_identity.get('user_assigned_identities')
+
+ # If type set to SystamAssigned, and Resource has SystamAssigned, nothing to do
+ if 'SystemAssigned' in param_identity.get('type') and current_managed_type == 'SystemAssigned':
+ pass
+ # If type set to SystemAssigned, and Resource has current identity, remove UserAssigned identity
+ elif param_identity.get('type') == 'SystemAssigned':
+ changed = True
+ # If type in module args contains 'UserAssigned'
+ elif 'UserAssigned' in param_identity.get('type'):
+ if param_managed_identity not in current_managed_identity.keys():
+ user_identity = {param_managed_identity: {}}
+ changed = True
+
+ new_identity = self.managedcluster_models.ManagedClusterIdentity(
+ type=param_identity.get('type'),
+ )
+ if user_identity:
+ new_identity.user_assigned_identities = user_identity
+
+ return changed, new_identity
+
def main():
"""Main execution"""
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aksagentpool.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aksagentpool.py
index f84362e95..aaf4f9876 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aksagentpool.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aksagentpool.py
@@ -102,6 +102,337 @@ options:
description:
- Maximum number of pods that can run on a node.
type: int
+ kubelet_disk_type:
+ description:
+ - Determines the placement of emptyDir volumes, container runtime data root, and Kubelet ephemeral storage.
+ type: str
+ choices:
+ - OS
+ - Temporary
+ workload_runtime:
+ description:
+ - Determines the type of workload a node can run.
+ type: str
+ choices:
+ - OCIContainer
+ - WasmWasi
+ os_sku:
+ description:
+ - Specifies an OS SKU.
+ - This value must not be specified if OSType is Windows.
+ type: str
+ choices:
+ - Ubuntu
+ - CBLMariner
+ scale_down_mode:
+ description:
+ - This also effects the cluster autoscaler behavior.
+ - If not specified, it defaults to C(Delete).
+ type: str
+ default: Delete
+ choices:
+ - Delete
+ - Deallocate
+ upgrade_settings:
+ description:
+ - Settings for upgrading the agentpool.
+ type: dict
+ suboptions:
+ max_surge:
+ description:
+ - This can either be set to an integer, sucha as C(5) or percentage C(50%).
+ - If a percentage is specified, it is the percentage of the total agent pool size at the time of the upgrade.
+ - For percentages, fractional nodes are rounded up.
+ - If not specified, the default is C(1).
+ type: str
+ power_state:
+ description:
+ - When an Agent Pool is first created it is initially C(Running).
+ - The Agent Pool can be stopped by setting this field to C(Stopped).
+ - A stopped Agent Pool stops all of its VMs and does not accrue billing charges.
+ - An Agent Pool can only be stopped if it is Running and provisioning state is Succeeded.
+ type: dict
+ suboptions:
+ code:
+ description:
+ - Tells whether the cluster is C(Running) or C(Stopped).
+ type: str
+ choices:
+ - Running
+ - Stopped
+ enable_node_public_ip:
+ description:
+ - Some scenarios may require nodes in a node pool to receive theirown dedicated public IP addresses.
+ - A common scenario is for gaming workloads, where a console needs to make a direct connection to a cloud virtual machine to minimize hops.
+ type: bool
+ scale_set_priority:
+ description:
+ - The Virtual Machine Scale Set priority.
+ - If not specified, the default is C(Regular).
+ type: str
+ choices:
+ - Spot
+ - Regular
+ node_public_ip_prefix_id:
+ description:
+ - The Azure Public IP prefix's ID.
+ type: str
+ scale_set_eviction_policy:
+ description:
+ - This cannot be specified unless the I(scale_set_priority=Spot).
+ - If not specified, the default is C(Delete).
+ type: str
+ choices:
+ - Delete
+ - Deallocate
+ spot_max_price:
+ description:
+ - Possible values are any decimal value greater than zero or -1.
+ - Indicates the willingness to pay any on-demand price.
+ type: float
+ proximity_placement_group_id:
+ description:
+ - The ID for Proximity Placement Group.
+ type: str
+ kubelet_config:
+ description:
+ - The Kubelet configuration on the agent pool nodes.
+ type: dict
+ suboptions:
+ cpu_manager_policy:
+ description:
+ - Kubernetes CPU management policies.
+ - The default is C(none).
+ type: str
+ default: none
+ choices:
+ - none
+ - static
+ cpu_cfs_quota:
+ description:
+ - The default is C(true).
+ type: bool
+ default: true
+ cpu_cfs_quota_period:
+ description:
+ - The default is C(100ms).
+ - Valid values are a sequence of decimal numbers with an optional fraction and a unit suffix.
+ type: str
+ default: 100ms
+ image_gc_high_threshold:
+ description:
+ - To disable image garbage collection, set to C(100).
+ - The default is C(85)
+ type: int
+ default: 85
+ image_gc_low_threshold:
+ description:
+ - This cannot be set higher than imageGcHighThreshold.
+ - The default is C(80).
+ type: int
+ default: 80
+ topology_manager_policy:
+ description:
+ - Kubernetes Topology Manager policies.
+ - The default is C(none).
+ type: str
+ default: none
+ choices:
+ - none
+ - best-effort
+ - restricted
+ - single-numa-node
+ allowed_unsafe_sysctls:
+ description:
+ - Allowed list of unsafe sysctls or unsafe sysctl patterns.
+ type: list
+ elements: str
+ fail_swap_on:
+ description:
+ - If set to true it will make the Kubelet fail to start if swap is enabled on the node.
+ type: bool
+ container_log_max_size_mb:
+ description:
+ - The maximum size of container log file before it is rotated.
+ type: int
+ container_log_max_files:
+ description:
+ - The maximum number of container log files that can be present for a container. The number must be ≥ 2.
+ type: int
+ pod_max_pids:
+ description:
+ - The maximum number of processes per pod.
+ type: int
+ linux_os_config:
+ description:
+ - The OS configuration of Linux agent nodes.
+ type: dict
+ suboptions:
+ sysctls:
+ description:
+ - Sysctl settings for Linux agent nodes.
+ type: dict
+ suboptions:
+ net_core_somaxconn:
+ description:
+ - Sysctl setting net.core.somaxconn.
+ type: int
+ net_core_netdev_max_backlog:
+ description:
+ - Sysctl setting net.core.netdev_max_backlog.
+ type: int
+ net_core_rmem_default:
+ description:
+ - Sysctl setting net.core.rmem_default.
+ type: int
+ net_core_rmem_max:
+ description:
+ - Sysctl setting net.core.rmem_max.
+ type: int
+ net_core_wmem_default:
+ description:
+ - Sysctl setting net.core.wmem_default.
+ type: int
+ net_core_wmem_max:
+ description:
+ - Sysctl setting net.core.wmem_max.
+ type: int
+ net_core_optmem_max:
+ description:
+ - Sysctl setting net.core.optmem_max.
+ type: int
+ net_ipv4_tcp_max_syn_backlog:
+ description:
+ - Sysctl setting net.ipv4.tcp_max_syn_backlog.
+ type: int
+ net_ipv4_tcp_max_tw_buckets:
+ description:
+ - Sysctl setting net.ipv4.tcp_max_tw_buckets.
+ type: int
+ net_ipv4_tcp_fin_timeout:
+ description:
+ - Sysctl setting net.ipv4.tcp_fin_timeout.
+ type: int
+ net_ipv4_tcp_keepalive_time:
+ description:
+ - Sysctl setting net.ipv4.tcp_keepalive_time.
+ type: int
+ net_ipv4_tcp_keepalive_probes:
+ description:
+ - Sysctl setting net.ipv4.tcp_keepalive_probes.
+ type: int
+ net_ipv4_tcpkeepalive_intvl:
+ description:
+ - Sysctl setting net.ipv4.tcp_keepalive_intvl.
+ type: int
+ net_ipv4_tcp_tw_reuse:
+ description:
+ - Sysctl setting net.ipv4.tcp_tw_reuse.
+ type: bool
+ net_ipv4_ip_local_port_range:
+ description:
+ - Sysctl setting net.ipv4.ip_local_port_range.
+ type: str
+ net_ipv4_neigh_default_gc_thresh1:
+ description:
+ - Sysctl setting net.ipv4.neigh.default.gc_thresh1.
+ type: int
+ net_ipv4_neigh_default_gc_thresh2:
+ description:
+ - Sysctl setting net.ipv4.neigh.default.gc_thresh2.
+ type: int
+ net_ipv4_neigh_default_gc_thresh3:
+ description:
+ - Sysctl setting net.ipv4.neigh.default.gc_thresh3.
+ type: int
+ fs_inotify_max_user_watches:
+ description:
+ - Sysctl setting fs.inotify.max_user_watches.
+ type: int
+ fs_file_max:
+ description:
+ - Sysctl setting fs.file-max.
+ type: int
+ fs_aio_max_nr:
+ description:
+ - Sysctl setting fs.aio-max-nr.
+ type: int
+ fs_nr_open:
+ description:
+ - Sysctl setting fs.nr_open.
+ type: int
+ kernel_threads_max:
+ description:
+ - Sysctl setting kernel.threads-max.
+ type: int
+ vm_max_map_count:
+ description:
+ - Sysctl setting vm.max_map_count.
+ type: int
+ vm_swappiness:
+ description:
+ - Sysctl setting vm.swappiness.
+ type: int
+ vm_vfs_cache_pressure:
+ description:
+ - Sysctl setting vm.vfs_cache_pressure.
+ type: int
+ net_netfilter_nf_conntrack_max:
+ description:
+ - sysctl setting net.netfilter.nf_conntrack_max.
+ type: int
+ net_netfilter_nf_conntrack_buckets:
+ description:
+ - Sysctl setting net.netfilter.nf_conntrack_buckets.
+ type: int
+ transparent_huge_page_enabled:
+ description:
+ - The node agent pool transparent hugepage.
+ - The default is C(always).
+ type: str
+ default: always
+ choices:
+ - always
+ - madvise
+ - never
+ transparent_huge_page_defrag:
+ description:
+ - The node agent pool transparent huge page deferag.
+ - The default is C(madvise).
+ type: str
+ default: madvise
+ choices:
+ - always
+ - defer
+ - defer+madvise
+ - madvise
+ - never
+ swap_file_size_mb:
+ description:
+ - The size in MB of a swap file that will be created on each node.
+ type: int
+ enable_encryption_at_host:
+ description:
+ - This is only supported on certain VM sizes and in certain Azure regions.
+ type: bool
+ enable_ultra_ssd:
+ description:
+ - Whether to enable UltraSSD.
+ type: bool
+ enable_fips:
+ description:
+ - Whether enable FIPS node pool.
+ type: bool
+ gpu_instance_profile:
+ description:
+ - GPUInstanceProfile to be used to specify GPU MIG instance profile for supported GPU VM SKU.
+ type: str
+ choices:
+ - MIG1g
+ - MIG2g
+ - MIG3g
+ - MIG4g
+ - MIG7g
state:
description:
- State of the automation runbook. Use C(present) to create or update a automation runbook and use C(absent) to delete.
@@ -137,6 +468,31 @@ EXAMPLES = '''
availability_zones:
- 1
- 2
+- name: Create a node agent pool with custom config
+ azure_rm_aksagentpool:
+ resource_group: "{{ resource_group }}"
+ cluster_name: "min{{ rpfx }}"
+ name: default-new2
+ count: 1
+ vm_size: Standard_B2s
+ type_properties_type: VirtualMachineScaleSets
+ mode: System
+ node_labels: {"release":"stable"}
+ max_pods: 42
+ enable_auto_scaling: true
+ min_count: 1
+ max_count: 10
+ orchestrator_version: 1.23.5
+ availability_zones:
+ - 1
+ kubelet_config:
+ cpu_manager_policy: static
+ cpu_cfs_quota: true
+ fail_swap_on: false
+ linux_os_config:
+ transparent_huge_page_enabled: madvise
+ swap_file_size_mb: 1500
+ transparent_huge_page_defrag: defer+madvise
- name: Delete node agent pool
azure_rm_aksagentpool:
resource_group: "{{ resource_group }}"
@@ -313,6 +669,97 @@ aks_agent_pools:
type: str
returned: always
sample: null
+ kubelet_config:
+ description:
+ - The Kubelet configuration on the agent pool nodes.
+ type: dict
+ returned: always
+ sample: {
+ cpu_cfs_quota: true,
+ cpu_cfs_quota_period: 100ms,
+ cpu_manager_policy: static,
+ fail_swap_on: false,
+ image_gc_high_threshold: 85,
+ image_gc_low_threshold: 80,
+ topology_manager_policy: none
+ }
+ linux_os_config:
+ description:
+ - The OS configuration of Linux agent nodes.
+ type: dict
+ returned: always
+ sample: {
+ swap_file_size_mb: 1500,
+ sysctls: {},
+ transparent_huge_page_defrag: defer+madvise,
+ transparent_huge_page_enabled: madvise
+ }
+ power_state:
+ description:
+ - The agent pool's power state.
+ type: dict
+ returned: always
+ sample: {code: Running}
+ os_sku:
+ description:
+ - The node agent pool's SKU.
+ type: str
+ returned: always
+ sample: Ubuntu
+ tags:
+ description:
+ - The tags of the node agent pool.
+ type: dict
+ returned: always
+ sample: {key1: value1, key2: value2}
+ kubelet_disk_type:
+ description:
+ - Determines the placement of emptyDir volumes, container runtime data root, and Kubelet ephemeral storage.
+ type: str
+ returned: always
+ sample: OS
+ workload_runtime:
+ description:
+ - Determines the type of workload a node can run.
+ type: str
+ returned: always
+ sample: OCIContainer
+ scale_down_mode:
+ description:
+ - This also effects the cluster autoscaler behavior.
+ type: str
+ returned: always
+ sample: Delete
+ node_public_ip_prefix_id:
+ description:
+ - The Azure Public IP prefix's ID.
+ type: str
+ returned: always
+ sample: "/subscriptions/xxx-xxx/resourceGroups/myRG/providers/Microsoft.Network/publicIPPrefixes/pip01"
+ proximity_placement_group_id:
+ description:
+ - The ID for Proximity Placement Group.
+ type: str
+ returned: always
+ sample: /subscriptions/xxx-xxx/resourceGroups/myRG/providers/Microsoft.Compute/proximityPlacementGroups/proxi01
+ enable_encryption_at_host:
+ description:
+ - This is only supported on certain VM sizes and in certain Azure regions.
+ type: bool
+ returned: always
+ sample: false
+ enable_ultra_ssd:
+ description:
+ - Whether enable FIPS node pool.
+ type: bool
+ returned: always
+ sample: false
+ gpu_instance_profile:
+ description:
+ - GPUInstanceProfile to be used to specify GPU MIG instance profile for supported GPU VM SKU.
+ type: str
+ returned: always
+ sample: MIG1g
'''
from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
@@ -387,6 +834,144 @@ class AzureRMAksAgentPool(AzureRMModuleBase):
max_pods=dict(
type='int'
),
+ kubelet_disk_type=dict(
+ type='str', choices=['OS', 'Temporary']
+ ),
+ workload_runtime=dict(
+ type='str', choices=['OCIContainer', 'WasmWasi']
+ ),
+ os_sku=dict(
+ type='str', choices=["Ubuntu", "CBLMariner"]
+ ),
+ scale_down_mode=dict(
+ type='str',
+ choices=['Delete', 'Deallocate'],
+ default='Delete'
+ ),
+ upgrade_settings=dict(
+ type='dict',
+ options=dict(
+ max_surge=dict(
+ type='str'
+ )
+ )
+ ),
+ power_state=dict(
+ type='dict',
+ options=dict(
+ code=dict(
+ type='str',
+ choices=['Running', 'Stopped']
+ )
+ )
+ ),
+ enable_node_public_ip=dict(
+ type='bool'
+ ),
+ scale_set_priority=dict(
+ type='str',
+ choices=["Spot", "Regular"],
+ ),
+ node_public_ip_prefix_id=dict(
+ type='str'
+ ),
+ scale_set_eviction_policy=dict(
+ type='str',
+ choices=['Delete', 'Deallocate'],
+ ),
+ spot_max_price=dict(
+ type='float'
+ ),
+ proximity_placement_group_id=dict(
+ type='str'
+ ),
+ kubelet_config=dict(
+ type='dict',
+ options=dict(
+ cpu_manager_policy=dict(type='str', choices=['none', 'static'], default='none'),
+ cpu_cfs_quota=dict(type='bool', default='true'),
+ cpu_cfs_quota_period=dict(type='str', default='100ms'),
+ image_gc_high_threshold=dict(type='int', default=85),
+ image_gc_low_threshold=dict(type='int', default=80),
+ topology_manager_policy=dict(
+ type='str',
+ default='none',
+ choices=['none', 'best-effort', 'restricted', 'single-numa-node']
+ ),
+ allowed_unsafe_sysctls=dict(
+ type='list',
+ elements='str'
+ ),
+ fail_swap_on=dict(type='bool'),
+ container_log_max_size_mb=dict(type='int'),
+ container_log_max_files=dict(type='int'),
+ pod_max_pids=dict(type='int')
+ )
+ ),
+ linux_os_config=dict(
+ type='dict',
+ options=dict(
+ sysctls=dict(
+ type='dict',
+ options=dict(
+ net_core_somaxconn=dict(type='int'),
+ net_core_netdev_max_backlog=dict(type='int'),
+ net_core_rmem_default=dict(type='int'),
+ net_core_rmem_max=dict(type='int'),
+ net_core_wmem_default=dict(type='int'),
+ net_core_wmem_max=dict(type='int'),
+ net_core_optmem_max=dict(type='int'),
+ net_ipv4_tcp_max_syn_backlog=dict(type='int'),
+ net_ipv4_tcp_max_tw_buckets=dict(type='int'),
+ net_ipv4_tcp_fin_timeout=dict(type='int'),
+ net_ipv4_tcp_keepalive_time=dict(type='int'),
+ net_ipv4_tcp_keepalive_probes=dict(type='int'),
+ net_ipv4_tcpkeepalive_intvl=dict(type='int'),
+ net_ipv4_tcp_tw_reuse=dict(type='bool'),
+ net_ipv4_ip_local_port_range=dict(type='str'),
+ net_ipv4_neigh_default_gc_thresh1=dict(type='int'),
+ net_ipv4_neigh_default_gc_thresh2=dict(type='int'),
+ net_ipv4_neigh_default_gc_thresh3=dict(type='int'),
+ net_netfilter_nf_conntrack_max=dict(type='int'),
+ net_netfilter_nf_conntrack_buckets=dict(type='int'),
+ fs_inotify_max_user_watches=dict(type='int'),
+ fs_file_max=dict(type='int'),
+ fs_aio_max_nr=dict(type='int'),
+ fs_nr_open=dict(type='int'),
+ kernel_threads_max=dict(type='int'),
+ vm_max_map_count=dict(type='int'),
+ vm_swappiness=dict(type='int'),
+ vm_vfs_cache_pressure=dict(type='int')
+ )
+ ),
+ transparent_huge_page_enabled=dict(
+ type='str',
+ choices=['always', 'madvise', 'never'],
+ default='always'
+ ),
+ swap_file_size_mb=dict(
+ type='int'
+ ),
+ transparent_huge_page_defrag=dict(
+ type='str',
+ default='madvise',
+ choices=['always', 'defer', 'defer+madvise', 'madvise', 'never']
+ )
+ )
+ ),
+ enable_encryption_at_host=dict(
+ type='bool'
+ ),
+ enable_ultra_ssd=dict(
+ type='bool'
+ ),
+ enable_fips=dict(
+ type='bool'
+ ),
+ gpu_instance_profile=dict(
+ type='str',
+ choices=["MIG1g", "MIG2g", "MIG3g", "MIG4g", "MIG7g"]
+ ),
state=dict(
type='str',
choices=['present', 'absent'],
@@ -413,13 +998,32 @@ class AzureRMAksAgentPool(AzureRMModuleBase):
self.node_labels = None
self.min_count = None
self.max_pods = None
+ self.tags = None
+ self.kubelet_disk_type = None
+ self.workload_runtime = None
+ self.os_sku = None
+ self.scale_down_mode = None
+ self.upgrade_settings = None
+ self.power_state = None
+ self.enable_node_public_ip = None
+ self.scale_set_priority = None
+ self.node_public_ip_prefix_id = None
+ self.scale_set_eviction_policy = None
+ self.spot_max_price = None
+ self.proximity_placement_group_id = None
+ self.kubelet_config = None
+ self.linux_os_config = None
+ self.enable_encryption_at_host = None
+ self.enable_ultra_ssd = None
+ self.enable_fips = None
+ self.gpu_instance_profile = None
self.body = dict()
super(AzureRMAksAgentPool, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=True)
def exec_module(self, **kwargs):
- for key in list(self.module_arg_spec):
+ for key in list(self.module_arg_spec) + ['tags']:
setattr(self, key, kwargs[key])
if key not in ['resource_group', 'cluster_name', 'name', 'state']:
self.body[key] = kwargs[key]
@@ -430,8 +1034,16 @@ class AzureRMAksAgentPool(AzureRMModuleBase):
if self.state == 'present':
if agent_pool:
+ update_tags, self.body['tags'] = self.update_tags(agent_pool.get('tags'))
for key in self.body.keys():
- if self.body[key] is not None and self.body[key] != agent_pool[key]:
+ if key == 'tags':
+ if update_tags:
+ changed = True
+ elif self.body[key] is not None and isinstance(self.body[key], dict):
+ for item in self.body[key].keys():
+ if self.body[key][item] is not None and self.body[key][item] != agent_pool[key].get(item):
+ changed = True
+ elif self.body[key] is not None and self.body[key] != agent_pool[key] and key not in ['scale_set_priority', 'spot_max_price']:
changed = True
else:
self.body[key] = agent_pool[key]
@@ -509,14 +1121,54 @@ class AzureRMAksAgentPool(AzureRMModuleBase):
spot_max_price=agent_pool.spot_max_price,
node_labels=agent_pool.node_labels,
node_taints=agent_pool.node_taints,
+ tags=agent_pool.tags,
+ kubelet_disk_type=agent_pool.kubelet_disk_type,
+ workload_runtime=agent_pool.workload_runtime,
+ os_sku=agent_pool.os_sku,
+ scale_down_mode=agent_pool.scale_down_mode,
+ power_state=dict(),
+ node_public_ip_prefix_id=agent_pool.node_public_ip_prefix_id,
+ proximity_placement_group_id=agent_pool.proximity_placement_group_id,
+ kubelet_config=dict(),
+ linux_os_config=dict(),
+ enable_encryption_at_host=agent_pool.enable_encryption_at_host,
+ enable_ultra_ssd=agent_pool.enable_ultra_ssd,
+ enable_fips=agent_pool.enable_fips,
+ gpu_instance_profile=agent_pool.gpu_instance_profile
)
if agent_pool.upgrade_settings is not None:
agent_pool_dict['upgrade_settings']['max_surge'] = agent_pool.upgrade_settings.max_surge
+ else:
+ agent_pool_dict['upgrade_settings'] = None
if agent_pool.availability_zones is not None:
for key in agent_pool.availability_zones:
agent_pool_dict['availability_zones'].append(int(key))
+ else:
+ agent_pool_dict['availability_zones'] = None
+
+ if agent_pool.kubelet_config is not None:
+ agent_pool_dict['kubelet_config'] = agent_pool.kubelet_config.as_dict()
+ else:
+ agent_pool_dict['kubelet_config'] = None
+
+ if agent_pool.power_state is not None:
+ agent_pool_dict['power_state']['code'] = agent_pool.power_state.code
+ else:
+ agent_pool_dict['power_state'] = None
+
+ if agent_pool.linux_os_config is not None:
+ agent_pool_dict['linux_os_config']['transparent_huge_page_enabled'] = agent_pool.linux_os_config.transparent_huge_page_enabled
+ agent_pool_dict['linux_os_config']['transparent_huge_page_defrag'] = agent_pool.linux_os_config.transparent_huge_page_defrag
+ agent_pool_dict['linux_os_config']['swap_file_size_mb'] = agent_pool.linux_os_config.swap_file_size_mb
+ agent_pool_dict['linux_os_config']['sysctls'] = dict()
+ if agent_pool.linux_os_config.sysctls is not None:
+ agent_pool_dict['linux_os_config']['sysctls'] = agent_pool.linux_os_config.sysctls.as_dict()
+ else:
+ agent_pool_dict['linux_os_config']['sysctls'] = None
+ else:
+ agent_pool_dict['linux_os_config'] = None
return agent_pool_dict
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aksagentpool_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aksagentpool_info.py
index 59f3b696d..99ba08254 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aksagentpool_info.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aksagentpool_info.py
@@ -168,6 +168,12 @@ aks_agent_pools:
type: str
returned: always
sample: Linux
+ os_sku:
+ description:
+ - OS SKU to be used to specify os type.
+ type: str
+ returned: always
+ sample: Windows2022
provisioning_state:
description:
- The current deployment or provisioning state, which only appears in the response.
@@ -222,6 +228,91 @@ aks_agent_pools:
type: str
returned: always
sample: null
+ kubelet_config:
+ description:
+ - The Kubelet configuration on the agent pool nodes.
+ type: dict
+ returned: always
+ sample: {
+ cpu_cfs_quota: true,
+ cpu_cfs_quota_period: 100ms,
+ cpu_manager_policy: static,
+ fail_swap_on: false,
+ image_gc_high_threshold: 85,
+ image_gc_low_threshold: 80,
+ topology_manager_policy: none
+ }
+ linux_os_config:
+ description:
+ - The OS configuration of Linux agent nodes.
+ type: dict
+ returned: always
+ sample: {
+ swap_file_size_mb: 1500,
+ sysctls: {},
+ transparent_huge_page_defrag: defer+madvise,
+ transparent_huge_page_enabled: madvise
+ }
+ power_state:
+ description:
+ - The agent pool's power state.
+ type: dict
+ returned: always
+ sample: {code: Running}
+ tags:
+ description:
+ - The tags of the node agent pool.
+ type: dict
+ returned: always
+ sample: {key1: value1, key2: value2}
+ kubelet_disk_type:
+ description:
+ - Determines the placement of emptyDir volumes, container runtime data root, and Kubelet ephemeral storage.
+ type: str
+ returned: always
+ sample: OS
+ workload_runtime:
+ description:
+ - Determines the type of workload a node can run.
+ type: str
+ returned: always
+ sample: OCIContainer
+ scale_down_mode:
+ description:
+ - This also effects the cluster autoscaler behavior.
+ type: str
+ returned: always
+ sample: Delete
+ node_public_ip_prefix_id:
+ description:
+ - The Azure Public IP prefix's ID.
+ type: str
+ returned: always
+ sample: "/subscriptions/xxx-xxx/resourceGroups/myRG/providers/Microsoft.Network/publicIPPrefixes/pip01"
+ proximity_placement_group_id:
+ description:
+ - The ID for Proximity Placement Group.
+ type: str
+ returned: always
+ sample: /subscriptions/xxx-xxx/resourceGroups/myRG/providers/Microsoft.Compute/proximityPlacementGroups/proxi01
+ enable_encryption_at_host:
+ description:
+ - This is only supported on certain VM sizes and in certain Azure regions.
+ type: bool
+ returned: always
+ sample: false
+ enable_ultra_ssd:
+ description:
+ - Whether enable FIPS node pool.
+ type: bool
+ returned: always
+ sample: false
+ gpu_instance_profile:
+ description:
+ - GPUInstanceProfile to be used to specify GPU MIG instance profile for supported GPU VM SKU.
+ type: str
+ returned: always
+ sample: MIG1g
'''
from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
@@ -301,6 +392,7 @@ class AzureRMAgentPoolInfo(AzureRMModuleBase):
vnet_subnet_id=agent_pool.vnet_subnet_id,
max_pods=agent_pool.max_pods,
os_type=agent_pool.os_type,
+ os_sku=agent_pool.os_sku,
max_count=agent_pool.max_count,
min_count=agent_pool.min_count,
enable_auto_scaling=agent_pool.enable_auto_scaling,
@@ -317,14 +409,53 @@ class AzureRMAgentPoolInfo(AzureRMModuleBase):
spot_max_price=agent_pool.spot_max_price,
node_labels=agent_pool.node_labels,
node_taints=agent_pool.node_taints,
+ tags=agent_pool.tags,
+ kubelet_disk_type=agent_pool.kubelet_disk_type,
+ workload_runtime=agent_pool.workload_runtime,
+ scale_down_mode=agent_pool.scale_down_mode,
+ power_state=dict(),
+ node_public_ip_prefix_id=agent_pool.node_public_ip_prefix_id,
+ proximity_placement_group_id=agent_pool.proximity_placement_group_id,
+ kubelet_config=dict(),
+ linux_os_config=dict(),
+ enable_encryption_at_host=agent_pool.enable_encryption_at_host,
+ enable_ultra_ssd=agent_pool.enable_ultra_ssd,
+ enable_fips=agent_pool.enable_fips,
+ gpu_instance_profile=agent_pool.gpu_instance_profile
)
if agent_pool.upgrade_settings is not None:
agent_pool_dict['upgrade_settings']['max_surge'] = agent_pool.upgrade_settings.max_surge
+ else:
+ agent_pool_dict['upgrade_settings'] = None
if agent_pool.availability_zones is not None:
for key in agent_pool.availability_zones:
agent_pool_dict['availability_zones'].append(int(key))
+ else:
+ agent_pool_dict['availability_zones'] = None
+
+ if agent_pool.kubelet_config is not None:
+ agent_pool_dict['kubelet_config'] = agent_pool.kubelet_config.as_dict()
+ else:
+ agent_pool_dict['kubelet_config'] = None
+
+ if agent_pool.linux_os_config is not None:
+ agent_pool_dict['linux_os_config']['transparent_huge_page_enabled'] = agent_pool.linux_os_config.transparent_huge_page_enabled
+ agent_pool_dict['linux_os_config']['transparent_huge_page_defrag'] = agent_pool.linux_os_config.transparent_huge_page_defrag
+ agent_pool_dict['linux_os_config']['swap_file_size_mb'] = agent_pool.linux_os_config.swap_file_size_mb
+ agent_pool_dict['linux_os_config']['sysctls'] = dict()
+ if agent_pool.linux_os_config.sysctls is not None:
+ agent_pool_dict['linux_os_config']['sysctls'] = agent_pool.linux_os_config.sysctls.as_dict()
+ else:
+ agent_pool_dict['linux_os_config']['sysctls'] = None
+ else:
+ agent_pool_dict['linux_os_config'] = None
+
+ if agent_pool.power_state is not None:
+ agent_pool_dict['power_state']['code'] = agent_pool.power_state.code
+ else:
+ agent_pool_dict['power_state'] = None
return agent_pool_dict
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_backupazurevm.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_backupazurevm.py
index 863839329..eb6c297d4 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_backupazurevm.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_backupazurevm.py
@@ -379,14 +379,8 @@ class BackupAzureVM(AzureRMModuleBaseExt):
self.log('Error attempting to backup azure vm.')
self.fail(
'Error while taking on-demand backup: {0}'.format(str(e)))
-
- if hasattr(response, 'body'):
- response = json.loads(response.body())
- elif hasattr(response, 'context'):
- response = response.context['deserialized_data']
- else:
- self.fail("Create or Updating fail, no match message return, return info as {0}".format(response))
-
+ # The return value is None, which only triggers the backup. Backups also take some time to complete.
+ response = dict(msg='The backup has been successfully triggered, please monitor the backup process on the Backup Jobs page')
return response
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_cognitivesearch.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_cognitivesearch.py
index a8852c583..01dda868e 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_cognitivesearch.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_cognitivesearch.py
@@ -395,31 +395,45 @@ class AzureRMSearch(AzureRMModuleBase):
self.results['changed'] = True
search_update_model.identity = self.search_client.services.models.Identity(type=self.identity)
+ network_update = False
if self.network_rule_set:
for rule in self.network_rule_set:
if len(self.network_rule_set) != len(self.account_dict.get('network_rule_set')) or rule not in self.account_dict.get('network_rule_set'):
self.results['changed'] = True
+ network_update = True
self.firewall_list.append(self.search_client.services.models.IpRule(value=rule))
search_update_model.network_rule_set = dict(ip_rules=self.firewall_list)
+ elif not network_update:
+ firewall_list = []
+ for rule in self.account_dict.get('network_rule_set', []):
+ firewall_list.append(self.search_client.services.models.IpRule(value=rule))
+ search_update_model.network_rule_set = dict(ip_rules=firewall_list)
if self.partition_count and self.account_dict.get('partition_count') != self.partition_count:
self.results['changed'] = True
search_update_model.partition_count = self.partition_count
+ else:
+ search_update_model.partition_count = self.account_dict.get('partition_count')
if self.public_network_access and self.account_dict.get('public_network_access').lower() != self.public_network_access.lower():
self.results['changed'] = True
search_update_model.public_network_access = self.public_network_access
+ else:
+ search_update_model.public_network_access = self.account_dict.get('public_network_access')
if self.replica_count and self.account_dict.get('replica_count') != self.replica_count:
self.results['changed'] = True
search_update_model.replica_count = self.replica_count
+ else:
+ search_update_model.replica_count = self.account_dict.get('replica_count')
if self.sku and self.account_dict.get('sku') != self.sku:
self.fail("Updating sku of an existing search service is not allowed.")
- if self.tags and self.account_dict.get('tags') != self.tags:
+ update_tags, new_tags = self.update_tags(self.account_dict.get('tags'))
+ if update_tags:
self.results['changed'] = True
- search_update_model.tags = self.tags
+ search_update_model.tags = new_tags
self.log('Updating search {0}'.format(self.name))
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_datalakestore.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_datalakestore.py
deleted file mode 100644
index b46907339..000000000
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_datalakestore.py
+++ /dev/null
@@ -1,807 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2020 David Duque Hernández, (@next-davidduquehernandez)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
-module: azure_rm_datalakestore
-version_added: "1.4.0"
-short_description: Manage Azure data lake store
-description:
- - Create, update or delete a data lake store.
-options:
- default_group:
- description:
- - The default owner group for all new folders and files created in the Data Lake Store account.
- type: str
- encryption_config:
- description:
- - The Key Vault encryption configuration.
- type: dict
- suboptions:
- type:
- description:
- - The type of encryption configuration being used.
- choices:
- - UserManaged
- - ServiceManaged
- required: true
- type: str
- key_vault_meta_info:
- description:
- - The Key Vault information for connecting to user managed encryption keys.
- type: dict
- suboptions:
- key_vault_resource_id:
- description:
- - The resource identifier for the user managed Key Vault being used to encrypt.
- type: str
- required: true
- encryption_key_name:
- description:
- - The name of the user managed encryption key.
- type: str
- required: true
- encryption_key_version:
- description:
- - The version of the user managed encryption key.
- type: str
- required: true
- encryption_state:
- description:
- - The current state of encryption for this Data Lake Store account.
- choices:
- - Enabled
- - Disabled
- type: str
- firewall_allow_azure_ips:
- description:
- - The current state of allowing or disallowing IPs originating within Azure through the firewall.
- - If the firewall is disabled, this is not enforced.
- choices:
- - Enabled
- - Disabled
- type: str
- firewall_rules:
- description:
- - The list of firewall rules associated with this Data Lake Store account.
- type: list
- elements: dict
- suboptions:
- name:
- description:
- - The unique name of the firewall rule to create.
- type: str
- required: true
- start_ip_address:
- description:
- - The start IP address for the firewall rule.
- - This can be either ipv4 or ipv6.
- - Start and End should be in the same protocol.
- type: str
- required: true
- end_ip_address:
- description:
- - The end IP address for the firewall rule.
- - This can be either ipv4 or ipv6.
- - Start and End should be in the same protocol.
- type: str
- required: true
- firewall_state:
- description:
- - The current state of the IP address firewall for this Data Lake Store account.
- choices:
- - Enabled
- - Disabled
- type: str
- identity:
- description:
- - The Key Vault encryption identity, if any.
- choices:
- - SystemAssigned
- type: str
- location:
- description:
- - The resource location.
- type: str
- name:
- description:
- - The name of the Data Lake Store account.
- type: str
- required: true
- new_tier:
- description:
- - The commitment tier to use for next month.
- choices:
- - Consumption
- - Commitment_1TB
- - Commitment_10TB
- - Commitment_100TB
- - Commitment_500TB
- - Commitment_1PB
- - Commitment_5PB
- type: str
- resource_group:
- description:
- - The name of the Azure resource group to use.
- required: true
- type: str
- aliases:
- - resource_group_name
- state:
- description:
- - State of the data lake store. Use C(present) to create or update a data lake store and use C(absent) to delete it.
- default: present
- choices:
- - absent
- - present
- type: str
- virtual_network_rules:
- description:
- - The list of virtual network rules associated with this Data Lake Store account.
- type: list
- elements: dict
- suboptions:
- name:
- description:
- - The unique name of the virtual network rule to create.
- type: str
- required: true
- subnet_id:
- description:
- - The resource identifier for the subnet.
- type: str
- required: true
-
-extends_documentation_fragment:
- - azure.azcollection.azure
- - azure.azcollection.azure_tags
-
-author:
- - David Duque Hernández (@next-davidduquehernandez)
-'''
-
-EXAMPLES = '''
-- name: Create Azure Data Lake Store
- azure_rm_datalakestore:
- resource_group: myResourceGroup
- name: myDataLakeStore
-'''
-
-RETURN = '''
-state:
- description:
- - Facts for Azure Data Lake Store created/updated.
- returned: always
- type: complex
- contains:
- account_id:
- description:
- - The unique identifier associated with this Data Lake Store account.
- returned: always
- type: str
- sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
- creation_time:
- description:
- - The account creation time.
- returned: always
- type: str
- sample: '2020-01-01T00:00:00.000000+00:00'
- current_tier:
- description:
- - The commitment tier in use for the current month.
- type: str
- returned: always
- sample: Consumption
- default_group:
- description:
- - The default owner group for all new folders and files created in the Data Lake Store account.
- type: str
- sample: null
- encryption_config:
- description:
- - The Key Vault encryption configuration.
- type: complex
- contains:
- type:
- description:
- - The type of encryption configuration being used.
- type: str
- returned: always
- sample: ServiceManaged
- key_vault_meta_info:
- description:
- - The Key Vault information for connecting to user managed encryption keys.
- type: complex
- contains:
- key_vault_resource_id:
- description:
- - The resource identifier for the user managed Key Vault being used to encrypt.
- type: str
- returned: always
- sample: /subscriptions/{subscriptionId}/resourceGroups/myResourceGroup/providers/Microsoft.KeyVault/vaults/tstkv
- encryption_key_name:
- description:
- - The name of the user managed encryption key.
- type: str
- returned: always
- sample: KeyName
- encryption_key_version:
- description:
- - The version of the user managed encryption key.
- type: str
- returned: always
- sample: 86a1e3b7406f45afa0d54e21eff47e39
- encryption_provisioning_state:
- description:
- - The current state of encryption provisioning for this Data Lake Store account.
- type: str
- sample: Succeeded
- encryption_state:
- description:
- - The current state of encryption for this Data Lake Store account.
- type: str
- returned: always
- sample: Enabled
- endpoint:
- description:
- - The full CName endpoint for this account.
- returned: always
- type: str
- sample: testaccount.azuredatalakestore.net
- firewall_allow_azure_ips:
- description:
- - The current state of allowing or disallowing IPs originating within Azure through the firewall.
- - If the firewall is disabled, this is not enforced.
- type: str
- returned: always
- sample: Disabled
- firewall_rules:
- description:
- - The list of firewall rules associated with this Data Lake Store account.
- type: list
- returned: always
- contains:
- name:
- description:
- - The resource name.
- type: str
- returned: always
- sample: Example Name
- start_ip_address:
- description:
- - The start IP address for the firewall rule.
- - This can be either ipv4 or ipv6.
- - Start and End should be in the same protocol.
- type: str
- returned: always
- sample: 192.168.1.1
- end_ip_address:
- description:
- - The end IP address for the firewall rule.
- - This can be either ipv4 or ipv6.
- - Start and End should be in the same protocol.
- type: str
- returned: always
- sample: 192.168.1.254
- firewall_state:
- description:
- - The current state of the IP address firewall for this Data Lake Store account.
- type: str
- returned: always
- sample: Enabled
- id:
- description:
- - The resource identifier.
- returned: always
- type: str
- sample: /subscriptions/{subscriptionId}/resourceGroups/myResourceGroup/providers/Microsoft.DataLakeStore/accounts/testaccount
- identity:
- description:
- - The Key Vault encryption identity, if any.
- type: complex
- contains:
- type:
- description:
- - The type of encryption being used.
- type: str
- sample: SystemAssigned
- principal_id:
- description:
- - The principal identifier associated with the encryption.
- type: str
- sample: 00000000-0000-0000-0000-000000000000
- tenant_id:
- description:
- - The tenant identifier associated with the encryption.
- type: str
- sample: 00000000-0000-0000-0000-000000000000
- last_modified_time:
- description:
- - The account last modified time.
- returned: always
- type: str
- sample: '2020-01-01T00:00:00.000000+00:00'
- location:
- description:
- - The resource location.
- returned: always
- type: str
- sample: westeurope
- name:
- description:
- - The resource name.
- returned: always
- type: str
- sample: testaccount
- new_tier:
- description:
- - The commitment tier to use for next month.
- type: str
- returned: always
- sample: Consumption
- provisioning_state:
- description:
- - The provisioning status of the Data Lake Store account.
- returned: always
- type: str
- sample: Succeeded
- state:
- description:
- - The state of the Data Lake Store account.
- returned: always
- type: str
- sample: Active
- tags:
- description:
- - The resource tags.
- returned: always
- type: dict
- sample: { "tag1":"abc" }
- trusted_id_providers:
- description:
- - The current state of the trusted identity provider feature for this Data Lake Store account.
- type: list
- returned: always
- contains:
- id:
- description:
- - The resource identifier.
- type: str
- name:
- description:
- - The resource name.
- type: str
- type:
- description:
- - The resource type.
- type: str
- id_provider:
- description:
- - The URL of this trusted identity provider.
- type: str
- trusted_id_provider_state:
- description:
- - The list of trusted identity providers associated with this Data Lake Store account.
- type: str
- returned: always
- sample: Enabled
- type:
- description:
- - The resource type.
- returned: always
- type: str
- sample: Microsoft.DataLakeStore/accounts
- virtual_network_rules:
- description:
- - The list of virtual network rules associated with this Data Lake Store account.
- type: list
- returned: always
- contains:
- name:
- description:
- - The resource name.
- type: str
- sample: Rule Name
- subnet_id:
- description:
- - The resource identifier for the subnet.
- type: str
- sample: /subscriptions/{subscriptionId}/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/vnet/subnets/default
-
-'''
-
-from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from azure.core.exceptions import ResourceNotFoundError
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-firewall_rules_item = dict(
- name=dict(type='str', required=True),
- start_ip_address=dict(type='str', required=True),
- end_ip_address=dict(type='str', required=True)
-)
-
-virtual_network_rules_item = dict(
- name=dict(type='str', required=True),
- subnet_id=dict(type='str', required=True)
-)
-
-
-class AzureRMDatalakeStore(AzureRMModuleBase):
- def __init__(self):
-
- self.module_arg_spec = dict(
- default_group=dict(type='str'),
- encryption_config=dict(
- type='dict',
- options=dict(
- type=dict(type='str', choices=['UserManaged', 'ServiceManaged'], required=True),
- key_vault_meta_info=dict(
- type='dict',
- no_log=True,
- options=dict(
- key_vault_resource_id=dict(type='str', required=True),
- encryption_key_name=dict(type='str', required=True),
- encryption_key_version=dict(type='str', no_log=True, required=True)
- )
- ),
- )
- ),
- encryption_state=dict(type='str', choices=['Enabled', 'Disabled']),
- firewall_allow_azure_ips=dict(type='str', choices=['Enabled', 'Disabled']),
- firewall_rules=dict(
- type='list',
- elements='dict',
- options=firewall_rules_item
- ),
- firewall_state=dict(type='str', choices=['Enabled', 'Disabled']),
- identity=dict(type='str', choices=['SystemAssigned']),
- location=dict(type='str'),
- name=dict(type='str', required=True),
- new_tier=dict(type='str', choices=['Consumption', 'Commitment_1TB', 'Commitment_10TB', 'Commitment_100TB',
- 'Commitment_500TB', 'Commitment_1PB', 'Commitment_5PB']),
- resource_group=dict(type='str', required=True, aliases=['resource_group_name']),
- state=dict(type='str', default='present', choices=['present', 'absent']),
- virtual_network_rules=dict(
- type='list',
- elements='dict',
- options=virtual_network_rules_item
- ),
- )
-
- self.state = None
- self.name = None
- self.resource_group = None
- self.location = None
- self.new_tier = None
- self.default_group = None
- self.encryption_config = dict()
- self.encryption_config_model = None
- self.encryption_state = None
- self.firewall_state = None
- self.firewall_allow_azure_ips = None
- self.firewall_rules = None
- self.firewall_rules_model = None
- self.virtual_network_rules = None
- self.virtual_network_rules_model = None
- self.identity = None
- self.identity_model = None
-
- self.results = dict(changed=False)
- self.account_dict = None
-
- super(AzureRMDatalakeStore, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=False,
- supports_tags=True)
-
- def exec_module(self, **kwargs):
- self.module.deprecate("The azure_rm_datalakestore.py will deprecated. Azure Data Lake Storage Gen1 retired on February 29,2024", version=(2.3, ))
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- setattr(self, key, kwargs[key])
-
- if self.encryption_config:
- key_vault_meta_info_model = None
- if self.encryption_config.get('key_vault_meta_info'):
- key_vault_meta_info_model = self.datalake_store_models.KeyVaultMetaInfo(
- key_vault_resource_id=self.encryption_config.get('key_vault_meta_info').get('key_vault_resource_id'),
- encryption_key_name=self.encryption_config.get('key_vault_meta_info').get('encryption_key_name'),
- encryption_key_version=self.encryption_config.get('key_vault_meta_info').get('encryption_key_version')
- )
- self.encryption_config_model = self.datalake_store_models.EncryptionConfig(type=self.encryption_config.get('type'),
- key_vault_meta_info=key_vault_meta_info_model)
-
- if self.identity is not None:
- self.identity_model = self.datalake_store_models.EncryptionIdentity(
- type=self.identity
- )
-
- resource_group = self.get_resource_group(self.resource_group)
- if not self.location:
- self.location = resource_group.location
-
- self.account_dict = self.get_datalake_store()
-
- if self.account_dict is not None:
- self.results['state'] = self.account_dict
- else:
- self.results['state'] = dict()
-
- if self.state == 'present':
- if not self.account_dict:
- self.results['state'] = self.create_datalake_store()
- else:
- self.results['state'] = self.update_datalake_store()
- else:
- self.delete_datalake_store()
- self.results['state'] = dict(state='Deleted')
-
- return self.results
-
- def check_name_availability(self):
- self.log('Checking name availability for {0}'.format(self.name))
- try:
- response = self.datalake_store_client.accounts.check_name_availability(self.location, parameters={'name': self.name})
- except Exception as e:
- self.log('Error attempting to validate name.')
- self.fail("Error checking name availability: {0}".format(str(e)))
- if not response.name_available:
- self.log('Error name not available.')
- self.fail("{0} - {1}".format(response.message, response.reason))
-
- def create_datalake_store(self):
- self.log("Creating datalake store {0}".format(self.name))
-
- if not self.location:
- self.fail('Parameter error: location required when creating a datalake store account.')
-
- self.check_name_availability()
- self.results['changed'] = True
-
- if self.check_mode:
- account_dict = dict(
- name=self.name,
- resource_group=self.resource_group,
- location=self.location
- )
- return account_dict
-
- if self.firewall_rules is not None:
- self.firewall_rules_model = list()
- for rule in self.firewall_rules:
- rule_model = self.datalake_store_models.CreateFirewallRuleWithAccountParameters(
- name=rule.get('name'),
- start_ip_address=rule.get('start_ip_address'),
- end_ip_address=rule.get('end_ip_address'))
- self.firewall_rules_model.append(rule_model)
-
- if self.virtual_network_rules is not None:
- self.virtual_network_rules_model = list()
- for vnet_rule in self.virtual_network_rules:
- vnet_rule_model = self.datalake_store_models.CreateVirtualNetworkRuleWithAccountParameters(
- name=vnet_rule.get('name'),
- subnet_id=vnet_rule.get('subnet_id'))
- self.virtual_network_rules_model.append(vnet_rule_model)
-
- parameters = self.datalake_store_models.CreateDataLakeStoreAccountParameters(
- default_group=self.default_group,
- encryption_config=self.encryption_config_model,
- encryption_state=self.encryption_state,
- firewall_allow_azure_ips=self.firewall_allow_azure_ips,
- firewall_rules=self.firewall_rules_model,
- firewall_state=self.firewall_state,
- identity=self.identity_model,
- location=self.location,
- new_tier=self.new_tier,
- tags=self.tags,
- virtual_network_rules=self.virtual_network_rules_model
- )
-
- self.log(str(parameters))
- try:
- poller = self.datalake_store_client.accounts.begin_create(self.resource_group, self.name, parameters)
- self.get_poller_result(poller)
- except Exception as e:
- self.log('Error creating datalake store.')
- self.fail("Failed to create datalake store: {0}".format(str(e)))
-
- return self.get_datalake_store()
-
- def update_datalake_store(self):
- self.log("Updating datalake store {0}".format(self.name))
-
- parameters = self.datalake_store_models.UpdateDataLakeStoreAccountParameters()
-
- if self.tags:
- update_tags, self.account_dict['tags'] = self.update_tags(self.account_dict['tags'])
- if update_tags:
- self.results['changed'] = True
- parameters.tags = self.account_dict['tags']
-
- if self.new_tier and self.account_dict.get('new_tier') != self.new_tier:
- self.results['changed'] = True
- parameters.new_tier = self.new_tier
-
- if self.default_group and self.account_dict.get('default_group') != self.default_group:
- self.results['changed'] = True
- parameters.default_group = self.default_group
-
- if self.encryption_state and self.account_dict.get('encryption_state') != self.encryption_state:
- self.fail("Encryption type cannot be updated.")
-
- if self.encryption_config:
- if (
- self.encryption_config.get('type') == 'UserManaged'
- and self.encryption_config.get('key_vault_meta_info') != self.account_dict.get('encryption_config').get('key_vault_meta_info')
- ):
- self.results['changed'] = True
- key_vault_meta_info_model = self.datalake_store_models.UpdateKeyVaultMetaInfo(
- encryption_key_version=self.encryption_config.get('key_vault_meta_info').get('encryption_key_version')
- )
- encryption_config_model = self.datalake_store_models.UpdateEncryptionConfig = key_vault_meta_info_model
- parameters.encryption_config = encryption_config_model
-
- if self.firewall_state and self.account_dict.get('firewall_state') != self.firewall_state:
- self.results['changed'] = True
- parameters.firewall_state = self.firewall_state
-
- if self.firewall_allow_azure_ips and self.account_dict.get('firewall_allow_azure_ips') != self.firewall_allow_azure_ips:
- self.results['changed'] = True
- parameters.firewall_allow_azure_ips = self.firewall_allow_azure_ips
-
- if self.firewall_rules is not None:
- if not self.compare_lists(self.firewall_rules, self.account_dict.get('firewall_rules')):
- self.firewall_rules_model = list()
- for rule in self.firewall_rules:
- rule_model = self.datalake_store_models.UpdateFirewallRuleWithAccountParameters(
- name=rule.get('name'),
- start_ip_address=rule.get('start_ip_address'),
- end_ip_address=rule.get('end_ip_address'))
- self.firewall_rules_model.append(rule_model)
- self.results['changed'] = True
- parameters.firewall_rules = self.firewall_rules_model
-
- if self.virtual_network_rules is not None:
- if not self.compare_lists(self.virtual_network_rules, self.account_dict.get('virtual_network_rules')):
- self.virtual_network_rules_model = list()
- for vnet_rule in self.virtual_network_rules:
- vnet_rule_model = self.datalake_store_models.UpdateVirtualNetworkRuleWithAccountParameters(
- name=vnet_rule.get('name'),
- subnet_id=vnet_rule.get('subnet_id'))
- self.virtual_network_rules_model.append(vnet_rule_model)
- self.results['changed'] = True
- parameters.virtual_network_rules = self.virtual_network_rules_model
-
- if self.identity_model is not None:
- self.results['changed'] = True
- parameters.identity = self.identity_model
-
- self.log(str(parameters))
- if self.results['changed']:
- try:
- poller = self.datalake_store_client.accounts.begin_update(self.resource_group, self.name, parameters)
- self.get_poller_result(poller)
- except Exception as e:
- self.log('Error creating datalake store.')
- self.fail("Failed to create datalake store: {0}".format(str(e)))
-
- return self.get_datalake_store()
-
- def delete_datalake_store(self):
- self.log('Delete datalake store {0}'.format(self.name))
-
- self.results['changed'] = True if self.account_dict is not None else False
- if not self.check_mode and self.account_dict is not None:
- try:
- status = self.datalake_store_client.accounts.begin_delete(self.resource_group, self.name)
- self.log("delete status: ")
- self.log(str(status))
- except Exception as e:
- self.fail("Failed to delete datalake store: {0}".format(str(e)))
-
- return True
-
- def get_datalake_store(self):
- self.log('Get properties for datalake store {0}'.format(self.name))
- datalake_store_obj = None
- account_dict = None
-
- try:
- datalake_store_obj = self.datalake_store_client.accounts.get(self.resource_group, self.name)
- except ResourceNotFoundError:
- pass
-
- if datalake_store_obj:
- account_dict = self.account_obj_to_dict(datalake_store_obj)
-
- return account_dict
-
- def account_obj_to_dict(self, datalake_store_obj):
- account_dict = dict(
- account_id=datalake_store_obj.account_id,
- creation_time=datalake_store_obj.creation_time,
- current_tier=datalake_store_obj.current_tier,
- default_group=datalake_store_obj.default_group,
- encryption_config=None,
- encryption_provisioning_state=datalake_store_obj.encryption_provisioning_state,
- encryption_state=datalake_store_obj.encryption_state,
- endpoint=datalake_store_obj.endpoint,
- firewall_allow_azure_ips=datalake_store_obj.firewall_allow_azure_ips,
- firewall_rules=None,
- firewall_state=datalake_store_obj.firewall_state,
- id=datalake_store_obj.id,
- identity=None,
- last_modified_time=datalake_store_obj.last_modified_time,
- location=datalake_store_obj.location,
- name=datalake_store_obj.name,
- new_tier=datalake_store_obj.new_tier,
- provisioning_state=datalake_store_obj.provisioning_state,
- state=datalake_store_obj.state,
- tags=datalake_store_obj.tags,
- trusted_id_providers=datalake_store_obj.trusted_id_providers,
- trusted_id_provider_state=datalake_store_obj.trusted_id_provider_state,
- type=datalake_store_obj.type,
- virtual_network_rules=None
- )
-
- account_dict['firewall_rules'] = list()
- if datalake_store_obj.firewall_rules:
- for rule in datalake_store_obj.firewall_rules:
- rule_item = dict(
- name=rule.name,
- start_ip_address=rule.start_ip_address,
- end_ip_address=rule.end_ip_address
- )
- account_dict['firewall_rules'].append(rule_item)
-
- account_dict['virtual_network_rules'] = list()
- if datalake_store_obj.virtual_network_rules:
- for vnet_rule in datalake_store_obj.virtual_network_rules:
- vnet_rule_item = dict(
- name=vnet_rule.name,
- subnet_id=vnet_rule.subnet_id
- )
- account_dict['virtual_network_rules'].append(vnet_rule_item)
-
- if datalake_store_obj.identity:
- account_dict['identity'] = dict(
- type=datalake_store_obj.identity.type,
- principal_id=datalake_store_obj.identity.principal_id,
- tenant_id=datalake_store_obj.identity.tenant_id
- )
-
- if datalake_store_obj.encryption_config:
- if datalake_store_obj.encryption_config.key_vault_meta_info:
- account_dict['encryption_config'] = dict(
- key_vault_meta_info=dict(
- key_vault_resource_id=datalake_store_obj.encryption_config.key_vault_meta_info.key_vault_resource_id,
- encryption_key_name=datalake_store_obj.encryption_config.key_vault_meta_info.encryption_key_name,
- encryption_key_version=datalake_store_obj.encryption_config.key_vault_meta_info.encryption_key_version
- )
- )
-
- return account_dict
-
- def compare_lists(self, list1, list2):
- if len(list1) != len(list2):
- return False
- for element in list1:
- if element not in list2:
- return False
- return True
-
-
-def main():
- AzureRMDatalakeStore()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_datalakestore_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_datalakestore_info.py
deleted file mode 100644
index 8444a4c1c..000000000
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_datalakestore_info.py
+++ /dev/null
@@ -1,468 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2020 David Duque Hernández, (@next-davidduquehernandez)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_datalakestore_info
-version_added: "1.4.0"
-short_description: Get Azure Data Lake Store info
-description:
- - Get Azure Data Lake Store info.
-
-options:
- resource_group:
- description:
- - The name of the Azure resource group.
- type: str
- aliases:
- - resource_group_name
- name:
- description:
- - The name of the Data Lake Store account.
- type: str
-
-extends_documentation_fragment:
- - azure.azcollection.azure
-
-author:
- - David Duque Hernández (@next-davidduquehernandez)
-
-'''
-
-EXAMPLES = '''
-- name: Get Azure Data Lake Store info from resource group 'myResourceGroup' and name 'myDataLakeStore'
- azure_rm_datalakestore_info:
- resource_group: myResourceGroup
- name: myDataLakeStore
-
-- name: Get Azure Data Lake Store info from resource group 'myResourceGroup'
- azure_rm_datalakestore_info:
- resource_group: myResourceGroup
-
-- name: Get Azure Data Lake Store info
- azure_rm_datalakestore_info:
-'''
-
-RETURN = '''
-datalake:
- description:
- - A list of dictionaries containing facts for Azure Data Lake Store.
- returned: always
- type: complex
- contains:
- account_id:
- description:
- - The unique identifier associated with this Data Lake Store account.
- returned: always
- type: str
- sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
- creation_time:
- description:
- - The account creation time.
- returned: always
- type: str
- sample: '2020-01-01T00:00:00.000000+00:00'
- current_tier:
- description:
- - The commitment tier in use for the current month.
- type: str
- sample: Consumption
- default_group:
- description:
- - The default owner group for all new folders and files created in the Data Lake Store account.
- type: str
- sample: null
- encryption_config:
- description:
- - The Key Vault encryption configuration.
- type: complex
- contains:
- type:
- description:
- - The type of encryption configuration being used.
- type: str
- returned: always
- sample: ServiceManaged
- key_vault_meta_info:
- description:
- - The Key Vault information for connecting to user managed encryption keys.
- type: complex
- contains:
- key_vault_resource_id:
- description:
- - The resource identifier for the user managed Key Vault being used to encrypt.
- type: str
- returned: always
- sample: /subscriptions/{subscriptionId}/resourceGroups/myRG/providers/Microsoft.KeyVault/vaults/testkv
- encryption_key_name:
- description:
- - The name of the user managed encryption key.
- type: str
- returned: always
- sample: KeyName
- encryption_key_version:
- description:
- - The version of the user managed encryption key.
- type: str
- returned: always
- sample: 86a1e3b7406f45afa0d54e21eff47e39
- encryption_provisioning_state:
- description:
- - The current state of encryption provisioning for this Data Lake Store account.
- type: str
- sample: Succeeded
- encryption_state:
- description:
- - The current state of encryption for this Data Lake Store account.
- type: str
- sample: Enabled
- endpoint:
- description:
- - The full CName endpoint for this account.
- returned: always
- type: str
- sample: testaccount.azuredatalakestore.net
- firewall_allow_azure_ips:
- description:
- - The current state of allowing or disallowing IPs originating within Azure through the firewall.
- type: str
- sample: Disabled
- firewall_rules:
- description:
- - The list of firewall rules associated with this Data Lake Store account.
- type: list
- contains:
- name:
- description:
- - The resource name.
- type: str
- returned: always
- sample: Example Name
- start_ip_address:
- description:
- - The start IP address for the firewall rule.
- - This can be either ipv4 or ipv6.
- - Start and End should be in the same protocol.
- type: str
- returned: always
- sample: 192.168.1.1
- end_ip_address:
- description:
- - The end IP address for the firewall rule.
- - This can be either ipv4 or ipv6.
- - Start and End should be in the same protocol.
- type: str
- returned: always
- sample: 192.168.1.254
- firewall_state:
- description:
- - The current state of the IP address firewall for this Data Lake Store account.
- type: str
- sample: Enabled
- id:
- description:
- - The resource identifier.
- returned: always
- type: str
- sample: /subscriptions/{subscriptionId}/resourceGroups/myResourceGroup/providers/Microsoft.DataLakeStore/accounts/testaccount
- identity:
- description:
- - The Key Vault encryption identity, if any.
- type: complex
- contains:
- type:
- description:
- - The type of encryption being used.
- type: str
- sample: SystemAssigned
- principal_id:
- description:
- - The principal identifier associated with the encryption.
- type: str
- sample: 00000000-0000-0000-0000-000000000000
- tenant_id:
- description:
- - The tenant identifier associated with the encryption.
- type: str
- sample: 00000000-0000-0000-0000-000000000000
- last_modified_time:
- description:
- - The account last modified time.
- returned: always
- type: str
- sample: '2020-01-01T00:00:00.000000+00:00'
- location:
- description:
- - The resource location.
- returned: always
- type: str
- sample: westeurope
- name:
- description:
- - The resource name.
- returned: always
- type: str
- sample: testaccount
- new_tier:
- description:
- - The commitment tier to use for next month.
- type: str
- sample: Consumption
- provisioning_state:
- description:
- - The provisioning status of the Data Lake Store account.
- returned: always
- type: str
- sample: Succeeded
- state:
- description:
- - The state of the Data Lake Store account.
- returned: always
- type: str
- sample: Active
- tags:
- description:
- - The resource tags.
- returned: always
- type: dict
- sample: { "tag1":"abc" }
- trusted_id_providers:
- description:
- - The current state of the trusted identity provider feature for this Data Lake Store account.
- type: list
- contains:
- id:
- description:
- - The resource identifier.
- type: str
- name:
- description:
- - The resource name.
- type: str
- type:
- description:
- - The resource type.
- type: str
- id_provider:
- description:
- - The URL of this trusted identity provider.
- type: str
- trusted_id_provider_state:
- description:
- - The list of trusted identity providers associated with this Data Lake Store account.
- type: str
- sample: Enabled
- type:
- description:
- - The resource type.
- returned: always
- type: str
- sample: Microsoft.DataLakeStore/accounts
- virtual_network_rules:
- description:
- - The list of virtual network rules associated with this Data Lake Store account.
- type: list
- contains:
- name:
- description:
- - The resource name.
- type: str
- sample: Rule Name
- subnet_id:
- description:
- - The resource identifier for the subnet.
- type: str
- sample: /subscriptions/{subscriptionId}/resourceGroups/myRG/providers/Microsoft.Network/virtualNetworks/vnet/subnets/default
-'''
-
-try:
- from azure.core.exceptions import ResourceNotFoundError
-except Exception:
- # This is handled in azure_rm_common
- pass
-
-from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
-
-
-class AzureRMDatalakeStoreInfo(AzureRMModuleBase):
- def __init__(self):
-
- self.module_arg_spec = dict(
- name=dict(type='str'),
- resource_group=dict(type='str', aliases=['resource_group_name'])
- )
-
- self.results = dict(
- changed=False,
- datalake=[]
- )
-
- self.name = None
- self.resource_group = None
-
- super(AzureRMDatalakeStoreInfo, self).__init__(self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=False)
-
- def exec_module(self, **kwargs):
- self.module.deprecate("The azure_rm_datalakestore_info.py will deprecated. Azure Data Lake Storage Gen1 retired on February 29,2024", version=(2.3, ))
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
-
- if self.name and not self.resource_group:
- self.fail("Parameter error: resource group required when filtering by name.")
-
- results = []
- if self.name:
- results = self.get_datalake_store()
- elif self.resource_group:
- results = self.list_resource_group()
- else:
- results = self.list_all()
-
- self.results['datalake'] = results
- return self.results
-
- def get_datalake_store(self):
- self.log('Get properties for datalake store {0}'.format(self.name))
- datalake_store_obj = None
-
- try:
- datalake_store_obj = self.datalake_store_client.accounts.get(self.resource_group, self.name)
- except ResourceNotFoundError:
- pass
-
- if datalake_store_obj:
- return [self.account_obj_to_dict(datalake_store_obj)]
-
- return list()
-
- def list_resource_group(self):
- self.log('Get basic properties for datalake store in resource group {0}'.format(self.resource_group))
- datalake_store_obj = None
- results = list()
-
- try:
- datalake_store_obj = self.datalake_store_client.accounts.list_by_resource_group(self.resource_group)
- except Exception:
- pass
-
- if datalake_store_obj:
- for datalake_item in datalake_store_obj:
- results.append(self.account_obj_to_dict_basic(datalake_item))
- return results
-
- return list()
-
- def list_all(self):
- self.log('Get basic properties for all datalake store')
- datalake_store_obj = None
- results = list()
-
- try:
- datalake_store_obj = self.datalake_store_client.accounts.list()
- except Exception:
- pass
-
- if datalake_store_obj:
- for datalake_item in datalake_store_obj:
- results.append(self.account_obj_to_dict_basic(datalake_item))
- return results
-
- return list()
-
- def account_obj_to_dict(self, datalake_store_obj):
- account_dict = dict(
- account_id=datalake_store_obj.account_id,
- creation_time=datalake_store_obj.creation_time,
- current_tier=datalake_store_obj.current_tier,
- default_group=datalake_store_obj.default_group,
- encryption_config=dict(type=datalake_store_obj.encryption_config.type,
- key_vault_meta_info=None),
- encryption_provisioning_state=datalake_store_obj.encryption_provisioning_state,
- encryption_state=datalake_store_obj.encryption_state,
- endpoint=datalake_store_obj.endpoint,
- firewall_allow_azure_ips=datalake_store_obj.firewall_allow_azure_ips,
- firewall_rules=None,
- firewall_state=datalake_store_obj.firewall_state,
- id=datalake_store_obj.id,
- identity=None,
- last_modified_time=datalake_store_obj.last_modified_time,
- location=datalake_store_obj.location,
- name=datalake_store_obj.name,
- new_tier=datalake_store_obj.new_tier,
- provisioning_state=datalake_store_obj.provisioning_state,
- state=datalake_store_obj.state,
- tags=datalake_store_obj.tags,
- trusted_id_providers=datalake_store_obj.trusted_id_providers,
- trusted_id_provider_state=datalake_store_obj.trusted_id_provider_state,
- type=datalake_store_obj.type,
- virtual_network_rules=None
- )
-
- account_dict['firewall_rules'] = list()
- for rule in datalake_store_obj.firewall_rules:
- rule_item = dict(
- name=rule.name,
- start_ip_address=rule.start_ip_address,
- end_ip_address=rule.end_ip_address
- )
- account_dict['firewall_rules'].append(rule_item)
-
- account_dict['virtual_network_rules'] = list()
- for vnet_rule in datalake_store_obj.virtual_network_rules:
- vnet_rule_item = dict(
- name=vnet_rule.name,
- subnet_id=vnet_rule.subnet_id
- )
- account_dict['virtual_network_rules'].append(vnet_rule_item)
-
- if datalake_store_obj.identity:
- account_dict['identity'] = dict(
- type=datalake_store_obj.identity.type,
- principal_id=datalake_store_obj.identity.principal_id,
- tenant_id=datalake_store_obj.identity.tenant_id
- )
-
- if datalake_store_obj.encryption_config.key_vault_meta_info:
- account_dict['encryption_config'] = dict(
- key_vault_meta_info=dict(
- key_vault_resource_id=datalake_store_obj.encryption_config.key_vault_meta_info.key_vault_resource_id,
- encryption_key_name=datalake_store_obj.encryption_config.key_vault_meta_info.encryption_key_name,
- encryption_key_version=datalake_store_obj.encryption_config.key_vault_meta_info.encryption_key_version
- )
- )
-
- return account_dict
-
- def account_obj_to_dict_basic(self, datalake_store_obj):
- account_dict = dict(
- account_id=datalake_store_obj.account_id,
- creation_time=datalake_store_obj.creation_time,
- endpoint=datalake_store_obj.endpoint,
- id=datalake_store_obj.id,
- last_modified_time=datalake_store_obj.last_modified_time,
- location=datalake_store_obj.location,
- name=datalake_store_obj.name,
- provisioning_state=datalake_store_obj.provisioning_state,
- state=datalake_store_obj.state,
- tags=datalake_store_obj.tags,
- type=datalake_store_obj.type
- )
-
- return account_dict
-
-
-def main():
- AzureRMDatalakeStoreInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_loadbalancer.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_loadbalancer.py
index 2c473778c..c6c56291a 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_loadbalancer.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_loadbalancer.py
@@ -328,99 +328,6 @@ options:
- Receive bidirectional TCP Reset on TCP flow idle timeout or unexpected connection termination.
- This element is only used when I(protocol=Tcp).
type: bool
- public_ip_address_name:
- description:
- - (deprecated) Name of an existing public IP address object to associate with the security group.
- - This option has been deprecated, and will be removed in 2.9. Use I(frontend_ip_configurations) instead.
- type: str
- aliases:
- - public_ip_address
- - public_ip_name
- - public_ip
- probe_port:
- description:
- - (deprecated) The port that the health probe will use.
- - This option has been deprecated, and will be removed in 2.9. Use I(probes) instead.
- type: int
- probe_protocol:
- description:
- - (deprecated) The protocol to use for the health probe.
- - This option has been deprecated, and will be removed in 2.9. Use I(probes) instead.
- type: str
- choices:
- - Tcp
- - Http
- - Https
- probe_interval:
- description:
- - (deprecated) Time (in seconds) between endpoint health probes.
- - This option has been deprecated, and will be removed in 2.9. Use I(probes) instead.
- type: int
- default: 15
- probe_fail_count:
- description:
- - (deprecated) The amount of probe failures for the load balancer to make a health determination.
- - This option has been deprecated, and will be removed in 2.9. Use I(probes) instead.
- default: 3
- type: int
- probe_request_path:
- description:
- - (deprecated) The URL that an HTTP probe or HTTPS probe will use (only relevant if I(probe_protocol=Http) or I(probe_protocol=Https)).
- - This option has been deprecated, and will be removed in 2.9. Use I(probes) instead.
- type: str
- protocol:
- description:
- - (deprecated) The protocol (TCP or UDP) that the load balancer will use.
- - This option has been deprecated, and will be removed in 2.9. Use I(load_balancing_rules) instead.
- type: str
- choices:
- - Tcp
- - Udp
- load_distribution:
- description:
- - (deprecated) The type of load distribution that the load balancer will employ.
- - This option has been deprecated, and will be removed in 2.9. Use I(load_balancing_rules) instead.
- type: str
- choices:
- - Default
- - SourceIP
- - SourceIPProtocol
- frontend_port:
- description:
- - (deprecated) Frontend port that will be exposed for the load balancer.
- - This option has been deprecated, and will be removed in 2.9. Use I(load_balancing_rules) instead.
- type: int
- backend_port:
- description:
- - (deprecated) Backend port that will be exposed for the load balancer.
- - This option has been deprecated, and will be removed in 2.9. Use I(load_balancing_rules) instead.
- type: int
- idle_timeout:
- description:
- - (deprecated) Timeout for TCP idle connection in minutes.
- - This option has been deprecated, and will be removed in 2.9. Use I(load_balancing_rules) instead.
- type: int
- default: 4
- natpool_frontend_port_start:
- description:
- - (deprecated) Start of the port range for a NAT pool.
- - This option has been deprecated, and will be removed in 2.9. Use I(inbound_nat_pools) instead.
- type: int
- natpool_frontend_port_end:
- description:
- - (deprecated) End of the port range for a NAT pool.
- - This option has been deprecated, and will be removed in 2.9. Use I(inbound_nat_pools) instead.
- type: int
- natpool_backend_port:
- description:
- - (deprecated) Backend port used by the NAT pool.
- - This option has been deprecated, and will be removed in 2.9. Use I(inbound_nat_pools) instead.
- type: int
- natpool_protocol:
- description:
- - (deprecated) The protocol for the NAT pool.
- - This option has been deprecated, and will be removed in 2.9. Use I(inbound_nat_pools) instead.
- type: str
extends_documentation_fragment:
- azure.azcollection.azure
- azure.azcollection.azure_tags
@@ -710,58 +617,6 @@ class AzureRMLoadBalancer(AzureRMModuleBase):
type='list',
elements='dict',
options=load_balancing_rule_spec
- ),
- public_ip_address_name=dict(
- type='str',
- aliases=['public_ip_address', 'public_ip_name', 'public_ip']
- ),
- probe_port=dict(
- type='int'
- ),
- probe_protocol=dict(
- type='str',
- choices=['Tcp', 'Http', 'Https']
- ),
- probe_interval=dict(
- type='int',
- default=15
- ),
- probe_fail_count=dict(
- type='int',
- default=3
- ),
- probe_request_path=dict(
- type='str'
- ),
- protocol=dict(
- type='str',
- choices=['Tcp', 'Udp']
- ),
- load_distribution=dict(
- type='str',
- choices=['Default', 'SourceIP', 'SourceIPProtocol']
- ),
- frontend_port=dict(
- type='int'
- ),
- backend_port=dict(
- type='int'
- ),
- idle_timeout=dict(
- type='int',
- default=4
- ),
- natpool_frontend_port_start=dict(
- type='int'
- ),
- natpool_frontend_port_end=dict(
- type='int'
- ),
- natpool_backend_port=dict(
- type='int'
- ),
- natpool_protocol=dict(
- type='str'
)
)
@@ -775,22 +630,7 @@ class AzureRMLoadBalancer(AzureRMModuleBase):
self.inbound_nat_rules = None
self.inbound_nat_pools = None
self.load_balancing_rules = None
- self.public_ip_address_name = None
self.state = None
- self.probe_port = None
- self.probe_protocol = None
- self.probe_interval = None
- self.probe_fail_count = None
- self.probe_request_path = None
- self.protocol = None
- self.load_distribution = None
- self.frontend_port = None
- self.backend_port = None
- self.idle_timeout = None
- self.natpool_frontend_port_start = None
- self.natpool_frontend_port_end = None
- self.natpool_backend_port = None
- self.natpool_protocol = None
self.tags = None
self.results = dict(changed=False, state=dict())
@@ -814,54 +654,6 @@ class AzureRMLoadBalancer(AzureRMModuleBase):
load_balancer = self.get_load_balancer()
if self.state == 'present':
- # compatible parameters
- is_compatible_param = not self.frontend_ip_configurations and not self.backend_address_pools and not self.probes and not self.inbound_nat_pools
- is_compatible_param = is_compatible_param and not load_balancer # the instance should not be exist
- is_compatible_param = is_compatible_param or self.public_ip_address_name or self.probe_protocol or self.natpool_protocol or self.protocol
- if is_compatible_param:
- self.deprecate('Discrete load balancer config settings are deprecated and will be removed.'
- ' Use frontend_ip_configurations, backend_address_pools, probes, inbound_nat_pools lists instead.', version=(2, 9))
- frontend_ip_name = 'frontendip0'
- backend_address_pool_name = 'backendaddrp0'
- prob_name = 'prob0'
- inbound_nat_pool_name = 'inboundnatp0'
- lb_rule_name = 'lbr'
- self.frontend_ip_configurations = [dict(
- name=frontend_ip_name,
- public_ip_address=self.public_ip_address_name
- )]
- self.backend_address_pools = [dict(
- name=backend_address_pool_name
- )]
- self.probes = [dict(
- name=prob_name,
- port=self.probe_port,
- protocol=self.probe_protocol,
- interval=self.probe_interval,
- fail_count=self.probe_fail_count,
- request_path=self.probe_request_path
- )] if self.probe_protocol else None
- self.inbound_nat_pools = [dict(
- name=inbound_nat_pool_name,
- frontend_ip_configuration_name=frontend_ip_name,
- protocol=self.natpool_protocol,
- frontend_port_range_start=self.natpool_frontend_port_start,
- frontend_port_range_end=self.natpool_frontend_port_end,
- backend_port=self.natpool_backend_port
- )] if self.natpool_protocol else None
- self.load_balancing_rules = [dict(
- name=lb_rule_name,
- frontend_ip_configuration=frontend_ip_name,
- backend_address_pool=backend_address_pool_name,
- probe=prob_name,
- protocol=self.protocol,
- load_distribution=self.load_distribution,
- frontend_port=self.frontend_port,
- backend_port=self.backend_port,
- idle_timeout=self.idle_timeout,
- enable_floating_ip=False,
- )] if self.protocol else None
-
# create new load balancer structure early, so it can be easily compared
if not load_balancer:
frontend_ip_configurations_param = [self.network_models.FrontendIPConfiguration(
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_localnetworkgateway.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_localnetworkgateway.py
new file mode 100644
index 000000000..6f88fff94
--- /dev/null
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_localnetworkgateway.py
@@ -0,0 +1,427 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2024 xuzhang3 (@xuzhang3), Fred-sun (@Fred-sun)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_localnetworkgateway
+
+version_added: "2.4.0"
+
+short_description: Manage Azure Local Network Gateway in a resource group
+
+description:
+ - Create, update or delete Azure Local Network Gateway in a resource group
+
+options:
+ resource_group:
+ description:
+ - The local network gateway's resource group.
+ type: str
+ required: true
+ name:
+ description:
+ - The name of the local network gateway.
+ type: str
+ required: true
+ location:
+ description:
+ - The location of the local network gateway.
+ type: str
+ local_network_address_space:
+ description:
+ - Local network site address space.
+ type: dict
+ suboptions:
+ address_prefixes:
+ description:
+ - A list of address blocks reserved for this virtual network in CIDR notation.
+ type: list
+ elements: str
+ gateway_ip_address:
+ description:
+ - IP address of local network gateway.
+ type: str
+ fqdn:
+ description:
+ - FQDN of local network gateway.
+ type: str
+ bgp_settings:
+ description:
+ - Local network gateway's BGP speaker settings.
+ type: dict
+ suboptions:
+ asn:
+ description:
+ - The BGP speaker's ASN.
+ type: int
+ bgp_peering_address:
+ description:
+ - The BGP peering address and BGP identifier of this BGP speaker.
+ type: str
+ peer_weight:
+ description:
+ - The weight added to routes learned from this BGP speaker.
+ type: int
+ state:
+ description:
+ - Use C(present) to create or update a local network gateway.
+ - Use C(absent) to delete the local network gateway.
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+extends_documentation_fragment:
+ - azure.azcollection.azure
+ - azure.azcollection.azure_tags
+
+author:
+ - xuzhang3 (@xuzhang3)
+ - Fred Sun (@Fred-sun)
+'''
+
+EXAMPLES = '''
+- name: Create a new local network gateway
+ azure_rm_localnetworkgateway:
+ resource_group: "{{ resource_group }}"
+ name: "localgateway-name"
+ local_network_address_space:
+ address_prefixes:
+ - 10.0.0.0/24
+ - 20.0.0.0/24
+ fqdn: fredtest.com
+ tags:
+ key: value
+ bgp_settings:
+ asn: 8
+ bgp_peering_address: 10.3.0.1
+ peer_weight: 3
+
+- name: Delete local network gateway
+ azure_rm_localnetworkgateway:
+ resource_group: "{{ resource_group }}"
+ name: "localgateway-name"
+ state: absent
+'''
+
+RETURN = '''
+state:
+ description:
+ - Current state of the Azure Local Network Gateway resource.
+ returned: always
+ type: complex
+ contains:
+ id:
+ description:
+ - The resource ID.
+ type: str
+ returned: always
+ sample: "/subscriptions/xxxx-xxxx/resourceGroups/testRG/providers/Microsoft.Network/localNetworkGateways/testgateway"
+ bgp_settings:
+ description:
+ - Local network gateway's BGP speaker settings.
+ type: complex
+ contains:
+ asn:
+ description:
+ - The BGP speaker's ASN.
+ type: int
+ returned: always
+ sample: 10
+ bgp_peering_address:
+ description:
+ - The BGP peering address and BGP identifier of this BGP speaker.
+ type: str
+ returned: always
+ sample: 10.0.0.3
+ peer_weight:
+ description:
+ - The weight added to routes learned from this BGP speaker.
+ type: int
+ returned: always
+ sample: 0
+ fqdn:
+ description:
+ - FQDN of local network gateway.
+ type: str
+ returned: always
+ sample: testfqdn.com
+ gateway_ip_address:
+ description:
+ - IP address of local network gateway.
+ type: str
+ returned: always
+ sample: 10.1.1.1
+ etag:
+ description:
+ - A unique read-only string that changes whenever the resource is updated.
+ type: str
+ returned: always
+ sample: b5a32693-2e75-49e0-9137-ded19db658d6
+ local_network_address_space:
+ description:
+ - Local network site address space.
+ type: complex
+ contains:
+ address_prefixes:
+ description:
+ - A list of address blocks reserved for this virtual network in CIDR notation.
+ type: list
+ returned: always
+ sample: ["10.0.0.0/24", "20.0.0.0/24"]
+ location:
+ description:
+ - The resource location.
+ type: str
+ returned: always
+ sample: eastus
+ name:
+ description:
+ - The resource name.
+ type: str
+ returned: always
+ sample: testgateway
+ provisioning_state:
+ description:
+ - The provisioning state of the local network gateway resource.
+ type: str
+ returned: always
+ sample: Succeeded
+ tags:
+ description:
+ - The resource tags.
+ type: str
+ returned: always
+ sample: {'key1': 'value1', 'key2': 'value2'}
+ type:
+ description:
+ - The resource type.
+ type: str
+ returned: always
+ sample: Microsoft.Network/localNetworkGateways
+'''
+
+from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from azure.core.exceptions import HttpResponseError
+ from azure.core.polling import LROPoller
+except Exception:
+ # handled in azure_rm_common
+ pass
+
+
+bgp_settings_spec = dict(
+ asn=dict(type='int'),
+ bgp_peering_address=dict(type='str'),
+ peer_weight=dict(type='int'),
+)
+
+
+local_network_address_space_spec = dict(
+ address_prefixes=dict(type='list', elements='str')
+)
+
+
+class AzureRMNetworkGateWay(AzureRMModuleBase):
+ """Utility class to get Azure Kubernetes Service Credentials facts"""
+
+ def __init__(self):
+
+ self.module_arg_spec = dict(
+ name=dict(type='str', required=True),
+ resource_group=dict(type='str', required=True),
+ location=dict(type='str'),
+ local_network_address_space=dict(type='dict', options=local_network_address_space_spec),
+ gateway_ip_address=dict(type='str'),
+ fqdn=dict(type='str'),
+ bgp_settings=dict(type='dict', options=bgp_settings_spec),
+ state=dict(type='str', default='present', choices=['present', 'absent'])
+ )
+
+ self.name = None
+ self.location = None
+ self.local_network_address_space = None
+ self.gateway_ip_address = None
+ self.fqdn = None
+ self.tags = None
+ self.bgp_settings = None
+
+ self.results = dict(
+ changed=False,
+ state=[],
+ )
+ mutually_exclusive = [['gateway_ip_address', 'fqdn']]
+
+ super(AzureRMNetworkGateWay, self).__init__(derived_arg_spec=self.module_arg_spec,
+ supports_check_mode=True,
+ mutually_exclusive=mutually_exclusive,
+ supports_tags=True,
+ facts_module=False)
+
+ def exec_module(self, **kwargs):
+
+ for key in list(self.module_arg_spec) + ['tags']:
+ setattr(self, key, kwargs[key])
+
+ if not self.location:
+ # Set default location
+ resource_group = self.get_resource_group(self.resource_group)
+ self.location = resource_group.location
+
+ old_response = self.get_local_network_gateway()
+ changed = False
+ update_tags = False
+
+ response = None
+ if self.state == 'present':
+ if old_response is not None:
+ if self.fqdn is not None and self.fqdn != old_response['fqdn']:
+ changed = True
+ else:
+ self.fqdn = old_response['fqdn']
+ if self.gateway_ip_address is not None and self.gateway_ip_address != old_response['gateway_ip_address']:
+ changed = True
+ else:
+ self.gateway_ip_address = old_response['gateway_ip_address']
+ if self.bgp_settings is not None and\
+ not all(self.bgp_settings.get(key) == old_response['bgp_settings'].get(key) for key in self.bgp_settings.keys()):
+ changed = True
+ if self.local_network_address_space is not None:
+ if old_response['local_network_address_space'].get('address_prefixes') is not None:
+ new_address = list(set(self.local_network_address_space['address_prefixes'] +
+ old_response['local_network_address_space']['address_prefixes']))
+ if len(new_address) > len(old_response['local_network_address_space'].get('address_prefixes')):
+ changed = True
+ self.local_network_address_space['address_prefixes'] = new_address
+ else:
+ changed = True
+ else:
+ self.local_network_address_space['address_prefixes'] = old_response['local_network_address_space'].get('address_prefixes')
+
+ update_tags, new_tags = self.update_tags(old_response.get('tags'))
+ if update_tags:
+ # response = self.update_local_network_gateway_tags(new_tags)
+ self.fail("Can't update the local network gateway tags, Exception code as AllPropertiesAreReadOnly")
+ changed = True
+ else:
+ changed = True
+
+ local_network_address_space = None
+ if self.local_network_address_space is not None:
+ local_network_address_space = self.network_models.AddressSpace(address_prefixes=self.local_network_address_space['address_prefixes'])
+ bgp_settings = None
+ if self.bgp_settings is not None:
+ bgp_settings = self.network_models.BgpSettings(asn=self.bgp_settings.get('asn'),
+ bgp_peering_address=self.bgp_settings.get('bgp_peering_address'),
+ peer_weight=self.bgp_settings.get('peer_weight'))
+
+ gateway_resource = self.network_models.LocalNetworkGateway(location=self.location,
+ tags=self.tags,
+ gateway_ip_address=self.gateway_ip_address,
+ fqdn=self.fqdn,
+ local_network_address_space=local_network_address_space,
+ bgp_settings=bgp_settings)
+ if changed:
+ if not self.check_mode:
+ response = self.create_or_update_local_network_gateway(gateway_resource)
+
+ if old_response is not None:
+ update_tags, new_tags = self.update_tags(old_response.get('tags'))
+ if update_tags:
+ if not self.check_mode:
+ response = self.update_local_network_gateway_tags(new_tags)
+ changed = True
+ else:
+ if not self.check_mode:
+ if old_response is not None:
+ self.delete_local_network_gateway()
+ changed = True
+ response = None
+ else:
+ changed = True
+
+ if response is None:
+ response = old_response
+ self.results['state'] = response
+ self.results['changed'] = changed
+ return self.results
+
+ def get_local_network_gateway(self):
+ """Gets the specified local network gateway in a resource group"""
+ response = None
+ try:
+ response = self.network_client.local_network_gateways.get(self.resource_group, self.name)
+ except HttpResponseError as ec:
+ self.log("Gets the specified local network gateway in a resource group Failed, Exception as {0}".format(ec))
+ return None
+ return self.format_response(response)
+
+ def create_or_update_local_network_gateway(self, body):
+ """Create or Update local network gateway"""
+ response = None
+ try:
+ response = self.network_client.local_network_gateways.begin_create_or_update(self.resource_group, self.name, body)
+ if isinstance(response, LROPoller):
+ response = self.get_poller_result(response)
+ except HttpResponseError as ec:
+ self.fail("Create or Updated a local network gateway in a resource group Failed, Exception as {0}".format(ec))
+
+ return self.format_response(response)
+
+ def update_local_network_gateway_tags(self, tags):
+ """Updates a local network gateway tags"""
+ response = None
+ try:
+ response = self.network_client.local_network_gateways.update_tags(self.resource_group, self.name, tags)
+ except HttpResponseError as ec:
+ self.fail("Update a local network gateway tags Failed, Exception as {0}".format(ec))
+ return self.format_response(response)
+
+ def delete_local_network_gateway(self):
+ """Deletes the specified local network gateway"""
+ try:
+ self.network_client.local_network_gateways.begin_delete(self.resource_group, self.name)
+ except HttpResponseError as ec:
+ self.fail("Deletes the specified local network gateway Failed, Exception as {0}".format(ec))
+ return None
+
+ def format_response(self, item):
+ result = dict(
+ id=item.id,
+ name=item.name,
+ location=item.location,
+ type=item.type,
+ tags=item.tags,
+ etag=item.etag,
+ local_network_address_space=dict(),
+ gateway_ip_address=item.gateway_ip_address,
+ fqdn=item.fqdn,
+ provisioning_state=item.provisioning_state,
+ bgp_settings=dict(),
+ )
+
+ if item.local_network_address_space is not None:
+ result['local_network_address_space']['address_prefixes'] = item.local_network_address_space.address_prefixes
+ if item.bgp_settings is not None:
+ result['bgp_settings']['asn'] = item.bgp_settings.asn
+ result['bgp_settings']['bgp_peering_address'] = item.bgp_settings.bgp_peering_address
+ result['bgp_settings']['peer_weight'] = item.bgp_settings.peer_weight
+ return result
+
+
+def main():
+ """Main module execution code path"""
+
+ AzureRMNetworkGateWay()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_localnetworkgateway_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_localnetworkgateway_info.py
new file mode 100644
index 000000000..445c2a237
--- /dev/null
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_localnetworkgateway_info.py
@@ -0,0 +1,267 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2024 xuzhang3 (@xuzhang3), Fred-sun (@Fred-sun)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_localnetworkgateway_info
+
+version_added: "2.4.0"
+
+short_description: Gets or list the specified local network gateway in a resource group
+
+description:
+ - Gets or list the specified local network gateway in a resource group.
+
+options:
+ resource_group:
+ description:
+ - The local network gateway's resource group.
+ type: str
+ required: true
+ name:
+ description:
+ - The name of the local network gateway.
+ type: str
+ tags:
+ description:
+ - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
+ type: list
+ elements: str
+
+extends_documentation_fragment:
+ - azure.azcollection.azure
+
+author:
+ - xuzhang3 (@xuzhang3)
+ - Fred Sun (@Fred-sun)
+'''
+
+EXAMPLES = '''
+- name: Gets the specified local network gateway in a resource group
+ azure_rm_localnetworkgateway_info:
+ resource_group: "{{ resource_group }}"
+ name: "{{ local_networkgateway_name }}"
+
+- name: Gets all the local network gateways in a resource group
+ azure_rm_localnetworkgateway_info:
+ resource_group: "{{ resource_group }}"
+
+- name: Gets all the local network gateways in a resource group and filter by tags
+ azure_rm_localnetworkgateway_info:
+ resource_group: "{{ resource_group }}"
+ tags:
+ - foo
+'''
+
+RETURN = '''
+state:
+ description:
+ - Current state of the Azure Local Network Gateway resource.
+ returned: always
+ type: complex
+ contains:
+ id:
+ description:
+ - The resource ID.
+ type: str
+ returned: always
+ sample: "/subscriptions/xxxx-xxxx/resourceGroups/testRG/providers/Microsoft.Network/localNetworkGateways/testgateway"
+ bgp_settings:
+ description:
+ - Local network gateway's BGP speaker settings.
+ type: complex
+ contains:
+ asn:
+ description:
+ - The BGP speaker's ASN.
+ type: int
+ returned: always
+ sample: 10
+ bgp_peering_address:
+ description:
+ - The BGP peering address and BGP identifier of this BGP speaker.
+ type: str
+ returned: always
+ sample: 10.0.0.3
+ peer_weight:
+ description:
+ - The weight added to routes learned from this BGP speaker.
+ type: int
+ returned: always
+ sample: 0
+ fqdn:
+ description:
+ - FQDN of local network gateway.
+ type: str
+ returned: always
+ sample: testfqdn.com
+ gateway_ip_address:
+ description:
+ - IP address of local network gateway.
+ type: str
+ returned: always
+ sample: 10.1.1.1
+ etag:
+ description:
+ - A unique read-only string that changes whenever the resource is updated.
+ type: str
+ returned: always
+ sample: b5a32693-2e75-49e0-9137-ded19db658d6
+ local_network_address_space:
+ description:
+ - Local network site address space.
+ type: complex
+ contains:
+ address_prefixes:
+ description:
+ - A list of address blocks reserved for this virtual network in CIDR notation.
+ type: list
+ returned: always
+ sample: ["10.0.0.0/24", "20.0.0.0/24"]
+ location:
+ description:
+ - The resource location.
+ type: str
+ returned: always
+ sample: eastus
+ name:
+ description:
+ - The resource name.
+ type: str
+ returned: always
+ sample: testgateway
+ provisioning_state:
+ description:
+ - The provisioning state of the local network gateway resource.
+ type: str
+ returned: always
+ sample: Succeeded
+ tags:
+ description:
+ - The resource tags.
+ type: str
+ returned: always
+ sample: {'key1': 'value1', 'key2': 'value2'}
+ type:
+ description:
+ - The resource type.
+ type: str
+ returned: always
+ sample: Microsoft.Network/localNetworkGateways
+'''
+
+from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from azure.core.exceptions import HttpResponseError
+except Exception:
+ # handled in azure_rm_common
+ pass
+
+
+class AzureRMNetworkGateWayInfo(AzureRMModuleBase):
+ """Utility class to get Azure Kubernetes Service Credentials facts"""
+
+ def __init__(self):
+
+ self.module_args = dict(
+ name=dict(type='str'),
+ resource_group=dict(type='str', required=True),
+ tags=dict(type='list', elements='str'),
+ )
+
+ self.name = None
+ self.tags = None
+
+ self.results = dict(
+ changed=False,
+ state=[],
+ )
+
+ super(AzureRMNetworkGateWayInfo, self).__init__(derived_arg_spec=self.module_args,
+ supports_check_mode=True,
+ supports_tags=False,
+ facts_module=True)
+
+ def exec_module(self, **kwargs):
+
+ for key in self.module_args:
+ setattr(self, key, kwargs[key])
+
+ if self.name is not None:
+ self.results['state'] = self.get_local_network_gateway()
+ else:
+ self.results['state'] = self.list_local_network_gateway()
+
+ return self.results
+
+ def get_local_network_gateway(self):
+ """Gets the specified local network gateway in a resource group"""
+ response = None
+
+ try:
+ response = self.network_client.local_network_gateways.get(self.resource_group, self.name)
+ except HttpResponseError as ec:
+ self.log("Gets the specified local network gateway in a resource group Failed, Exception as {0}".format(ec))
+ if response and self.has_tags(response.tags, self.tags):
+ return [self.format_response(response)]
+ else:
+ return []
+
+ def list_local_network_gateway(self):
+ """Gets all the local network gateways in a resource group"""
+ response = None
+
+ try:
+ response = self.network_client.local_network_gateways.list(self.resource_group)
+ except HttpResponseError as ec:
+ self.log("Gets all the local network gateways in a resource group Failed, Exception as {0}".format(ec))
+
+ if response:
+ results = []
+ for item in response:
+ if self.has_tags(item.tags, self.tags):
+ results.append(self.format_response(item))
+ return results
+ else:
+ return []
+
+ def format_response(self, item):
+ result = dict(
+ id=item.id,
+ name=item.name,
+ location=item.location,
+ type=item.type,
+ tags=item.tags,
+ etag=item.etag,
+ local_network_address_space=dict(),
+ gateway_ip_address=item.gateway_ip_address,
+ fqdn=item.fqdn,
+ provisioning_state=item.provisioning_state,
+ bgp_settings=dict(),
+ )
+
+ if item.local_network_address_space is not None:
+ result['local_network_address_space']['address_prefixes'] = item.local_network_address_space.address_prefixes
+ if item.bgp_settings is not None:
+ result['bgp_settings']['asn'] = item.bgp_settings.asn
+ result['bgp_settings']['bgp_peering_address'] = item.bgp_settings.bgp_peering_address
+ result['bgp_settings']['peer_weight'] = item.bgp_settings.peer_weight
+ return result
+
+
+def main():
+ """Main module execution code path"""
+
+ AzureRMNetworkGateWayInfo()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_networkinterface.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_networkinterface.py
index 3343d5ac1..beba7810b 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_networkinterface.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_networkinterface.py
@@ -79,47 +79,6 @@ options:
- Windows
- Linux
default: Linux
- private_ip_address:
- description:
- - (Deprecate) Valid IPv4 address that falls within the specified subnet.
- - This option will be deprecated in 2.9, use I(ip_configurations) instead.
- type: str
- private_ip_allocation_method:
- description:
- - (Deprecate) Whether or not the assigned IP address is permanent.
- - When creating a network interface, if you specify I(private_ip_address=Static), you must provide a value for I(private_ip_address).
- - You can update the allocation method to C(Static) after a dynamic private IP address has been assigned.
- - This option will be deprecated in 2.9, use I(ip_configurations) instead.
- default: Dynamic
- type: str
- choices:
- - Dynamic
- - Static
- public_ip:
- description:
- - (Deprecate) When creating a network interface, if no public IP address name is provided a default public IP address will be created.
- - Set to C(false) if you do not want a public IP address automatically created.
- - This option will be deprecated in 2.9, use I(ip_configurations) instead.
- type: bool
- default: 'yes'
- public_ip_address_name:
- description:
- - (Deprecate) Name of an existing public IP address object to associate with the security group.
- - This option will be deprecated in 2.9, use I(ip_configurations) instead.
- type: str
- aliases:
- - public_ip_address
- - public_ip_name
- public_ip_allocation_method:
- description:
- - (Deprecate) If a I(public_ip_address_name) is not provided, a default public IP address will be created.
- - The allocation method determines whether or not the public IP address assigned to the network interface is permanent.
- - This option will be deprecated in 2.9, use I(ip_configurations) instead.
- type: str
- choices:
- - Dynamic
- - Static
- default: Dynamic
ip_configurations:
description:
- List of IP configurations. Each configuration object should include
@@ -323,7 +282,6 @@ EXAMPLES = '''
virtual_network: vnet001
subnet_name: subnet001
create_with_security_group: false
- public_ip: false
ip_configurations:
- name: default
primary: true
@@ -614,13 +572,8 @@ class AzureRMNetworkInterface(AzureRMModuleBase):
create_with_security_group=dict(type='bool', default=True),
security_group=dict(type='raw', aliases=['security_group_name']),
state=dict(default='present', choices=['present', 'absent']),
- private_ip_address=dict(type='str'),
- private_ip_allocation_method=dict(type='str', choices=['Dynamic', 'Static'], default='Dynamic'),
- public_ip_address_name=dict(type='str', aliases=['public_ip_address', 'public_ip_name']),
- public_ip=dict(type='bool', default=True),
subnet_name=dict(type='str', aliases=['subnet']),
virtual_network=dict(type='raw', aliases=['virtual_network_name']),
- public_ip_allocation_method=dict(type='str', choices=['Dynamic', 'Static'], default='Dynamic'),
ip_configurations=dict(type='list', default=[], elements='dict', options=ip_configuration_spec),
os_type=dict(type='str', choices=['Windows', 'Linux'], default='Linux'),
open_ports=dict(type='list', elements='str'),
@@ -638,13 +591,8 @@ class AzureRMNetworkInterface(AzureRMModuleBase):
self.create_with_security_group = None
self.enable_accelerated_networking = None
self.security_group = None
- self.private_ip_address = None
- self.private_ip_allocation_method = None
- self.public_ip_address_name = None
- self.public_ip = None
self.subnet_name = None
self.virtual_network = None
- self.public_ip_allocation_method = None
self.state = None
self.tags = None
self.os_type = None
@@ -703,17 +651,13 @@ class AzureRMNetworkInterface(AzureRMModuleBase):
if len(asgs) > 0:
config['application_security_groups'] = asgs
+ # If ip_confiurations is not specified then provide the default
+ # private interface
if self.state == 'present' and not self.ip_configurations:
- # construct the ip_configurations array for compatible
- self.deprecate('Setting ip_configuration flatten is deprecated and will be removed.'
- ' Using ip_configurations list to define the ip configuration', version=(2, 9))
self.ip_configurations = [
dict(
- private_ip_address=self.private_ip_address,
- private_ip_allocation_method=self.private_ip_allocation_method,
- public_ip_address_name=self.public_ip_address_name if self.public_ip else None,
- public_ip_allocation_method=self.public_ip_allocation_method,
name='default',
+ private_ip_allocation_method='Dynamic',
primary=True
)
]
@@ -875,7 +819,7 @@ class AzureRMNetworkInterface(AzureRMModuleBase):
def get_or_create_public_ip_address(self, ip_config):
name = ip_config.get('public_ip_address_name')
- if not (self.public_ip and name):
+ if not name:
return None
pip = self.get_public_ip_address(name)
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlflexiblefirewallrule.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlflexiblefirewallrule.py
index c73843c46..1fba876f9 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlflexiblefirewallrule.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlflexiblefirewallrule.py
@@ -113,8 +113,6 @@ try:
from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
from azure.core.exceptions import ResourceNotFoundError
from azure.core.polling import LROPoller
- import logging
- logging.basicConfig(filename='log.log', level=logging.INFO)
except ImportError:
# This is handled in azure_rm_common
pass
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlflexibleserver.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlflexibleserver.py
index 335dc53c8..9d443deec 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlflexibleserver.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlflexibleserver.py
@@ -201,6 +201,42 @@ options:
- Whether to start the Post gresql server.
type: bool
default: False
+ identity:
+ description:
+ - Identity for the Server.
+ type: dict
+ version_added: '2.4.0'
+ suboptions:
+ type:
+ description:
+ - Type of the managed identity
+ required: false
+ choices:
+ - UserAssigned
+ - None
+ default: None
+ type: str
+ user_assigned_identities:
+ description:
+ - User Assigned Managed Identities and its options
+ required: false
+ type: dict
+ default: {}
+ suboptions:
+ id:
+ description:
+ - List of the user assigned identities IDs associated to the VM
+ required: false
+ type: list
+ elements: str
+ default: []
+ append:
+ description:
+ - If the list of identities has to be appended to current identities (true) or if it has to replace current identities (false)
+ required: false
+ type: bool
+ default: True
+
extends_documentation_fragment:
- azure.azcollection.azure
@@ -489,6 +525,7 @@ servers:
try:
from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
+ import azure.mgmt.rdbms.postgresql_flexibleservers.models as PostgreSQLFlexibleModels
from azure.core.exceptions import ResourceNotFoundError
from azure.core.polling import LROPoller
except ImportError:
@@ -534,6 +571,18 @@ storage_spec = dict(
)
+user_assigned_identities_spec = dict(
+ id=dict(type='list', default=[], elements='str'),
+ append=dict(type='bool', default=True)
+)
+
+
+managed_identity_spec = dict(
+ type=dict(type='str', choices=['UserAssigned', 'None'], default='None'),
+ user_assigned_identities=dict(type='dict', options=user_assigned_identities_spec, default={}),
+)
+
+
class AzureRMPostgreSqlFlexibleServers(AzureRMModuleBase):
"""Configuration class for an Azure RM PostgreSQL Flexible Server resource"""
@@ -613,6 +662,7 @@ class AzureRMPostgreSqlFlexibleServers(AzureRMModuleBase):
source_server_resource_id=dict(
type='str'
),
+ identity=dict(type='dict', options=managed_identity_spec),
state=dict(
type='str',
default='present',
@@ -628,6 +678,7 @@ class AzureRMPostgreSqlFlexibleServers(AzureRMModuleBase):
self.is_start = None
self.is_stop = None
self.is_restart = None
+ self.identity = None
self.results = dict(changed=False)
self.state = None
@@ -663,6 +714,10 @@ class AzureRMPostgreSqlFlexibleServers(AzureRMModuleBase):
self.log("PostgreSQL Flexible Server instance doesn't exist")
if self.state == 'present':
if not self.check_mode:
+ if self.identity:
+ update_identity, new_identity = self.update_identities({})
+ if update_identity:
+ self.parameters['identity'] = new_identity
response = self.create_postgresqlflexibleserver(self.parameters)
if self.is_stop:
self.stop_postgresqlflexibleserver()
@@ -712,6 +767,12 @@ class AzureRMPostgreSqlFlexibleServers(AzureRMModuleBase):
else:
self.update_parameters['maintenance_window'][key] = old_response['maintenance_window'].get(key)
+ if self.identity:
+ update_identity, new_identity = self.update_identities(old_response.get('identity', {}))
+ if update_identity:
+ self.update_parameters['identity'] = new_identity
+ update_flag = True
+
update_tags, new_tags = self.update_tags(old_response['tags'])
self.update_parameters['tags'] = new_tags
if update_tags:
@@ -915,9 +976,50 @@ class AzureRMPostgreSqlFlexibleServers(AzureRMModuleBase):
result['maintenance_window']['start_minute'] = item.maintenance_window.start_minute
result['maintenance_window']['start_hour'] = item.maintenance_window.start_hour
result['maintenance_window']['day_of_week'] = item.maintenance_window.day_of_week
+ if item.identity is not None:
+ result['identity'] = item.identity.as_dict()
+ else:
+ result['identity'] = PostgreSQLFlexibleModels.UserAssignedIdentity(type='None').as_dict()
return result
+ def update_identities(self, curr_identity):
+ new_identities = []
+ changed = False
+ current_managed_type = curr_identity.get('type', 'None')
+ current_managed_identities = set(curr_identity.get('user_assigned_identities', {}).keys())
+ param_identity = self.module.params.get('identity')
+ param_identities = set(param_identity.get('user_assigned_identities', {}).get('id', []))
+ new_identities = param_identities
+
+ # If type set to None, and Resource has None, nothing to do
+ if 'None' in param_identity.get('type') and current_managed_type == 'None':
+ pass
+ # If type set to None, and Resource has current identities, remove UserAssigned identities
+ elif param_identity.get('type') == 'None':
+ changed = True
+ # If type in module args contains 'UserAssigned'
+ elif 'UserAssigned' in param_identity.get('type'):
+ if param_identity.get('user_assigned_identities', {}).get('append', False) is True:
+ new_identities = param_identities.union(current_managed_identities)
+ if len(current_managed_identities) != len(new_identities):
+ # update identities
+ changed = True
+ # If new identities have to overwrite current identities
+ else:
+ # Check if module args identities are different as current ones
+ if current_managed_identities.difference(new_identities) != set():
+ changed = True
+
+ # Append identities to the model
+ user_assigned_identities_dict = {uami: dict() for uami in new_identities}
+ new_identity = PostgreSQLFlexibleModels.UserAssignedIdentity(
+ type=param_identity.get('type'),
+ user_assigned_identities=user_assigned_identities_dict
+ )
+
+ return changed, new_identity
+
def main():
"""Main execution"""
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlflexibleserver_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlflexibleserver_info.py
index 50fe9adc5..8d5f2b636 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlflexibleserver_info.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlflexibleserver_info.py
@@ -282,6 +282,30 @@ servers:
returned: always
sample: null
type: str
+ identity:
+ description:
+ - Identity for the Server.
+ type: complex
+ returned: when available
+ contains:
+ type:
+ description:
+ - Type of the managed identity
+ returned: always
+ sample: UserAssigned
+ type: str
+ user_assigned_identities:
+ description:
+ - User Assigned Managed Identities and its options
+ returned: always
+ type: complex
+ contains:
+ id:
+ description:
+ - Dict of the user assigned identities IDs associated to the Resource
+ returned: always
+ type: dict
+ elements: dict
tags:
description:
- Tags assigned to the resource. Dictionary of string:string pairs.
@@ -293,6 +317,7 @@ servers:
try:
from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
+ import azure.mgmt.rdbms.postgresql_flexibleservers.models as PostgreSQLFlexibleModels
from azure.core.exceptions import ResourceNotFoundError
except ImportError:
# This is handled in azure_rm_common
@@ -431,6 +456,10 @@ class AzureRMPostgreSqlFlexibleServersInfo(AzureRMModuleBase):
result['maintenance_window']['start_minute'] = item.maintenance_window.start_minute
result['maintenance_window']['start_hour'] = item.maintenance_window.start_hour
result['maintenance_window']['day_of_week'] = item.maintenance_window.day_of_week
+ if item.identity is not None:
+ result['identity'] = item.identity.as_dict()
+ else:
+ result['identity'] = PostgreSQLFlexibleModels.UserAssignedIdentity(type='None').as_dict()
return result
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_rediscache_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_rediscache_info.py
index 782fb0417..d1019dd65 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_rediscache_info.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_rediscache_info.py
@@ -198,13 +198,13 @@ rediscaches:
contains:
primary:
description:
- - The current primary key that clients can use to authenticate the Redis cahce.
+ - The current primary key that clients can use to authenticate the Redis cache.
returned: always
type: str
sample: X2xXXxx7xxxxxx5xxxx0xxxxx75xxxxxxxxXXXxxxxx=
secondary:
description:
- - The current secondary key that clients can use to authenticate the Redis cahce.
+ - The current secondary key that clients can use to authenticate the Redis cache.
returned: always
type: str
sample: X2xXXxx7xxxxxx5xxxx0xxxxx75xxxxxxxxXXXxxxxx=
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidatabase.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidatabase.py
new file mode 100644
index 000000000..83d40fc5a
--- /dev/null
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidatabase.py
@@ -0,0 +1,392 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2024 xuzhang3 (@xuzhang3), Fred-sun (@Fred-sun)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_sqlmidatabase
+version_added: "2.4.0"
+short_description: Manage SQL Managed Instance databases
+description:
+ - Manage SQL Managed Instance databases.
+
+options:
+ resource_group:
+ description:
+ - The name of the resource group that contains the resource.
+ type: str
+ required: true
+ managed_instance_name:
+ description:
+ - The name of the SQL managed instance.
+ type: str
+ required: true
+ database_name:
+ description:
+ - The name of the SQL managed instance database.
+ type: str
+ required: true
+ collation:
+ description:
+ - The collation of the Azure SQL Managed Database collation to use.
+ - For example C(SQL_Latin1_General_CP1_CI_AS) or C(Latin1_General_100_CS_AS_SC).
+ type: str
+ location:
+ description:
+ - The resource location.
+ type: str
+ state:
+ description:
+ - State of the SQL Managed Database.
+ - Use C(present) to create or update a automation runbook and use C(absent) to delete.
+ type: str
+ default: present
+ choices:
+ - present
+ - absent
+extends_documentation_fragment:
+ - azure.azcollection.azure
+ - azure.azcollection.azure_tags
+
+author:
+ - xuzhang3 (@xuzhang3)
+ - Fred-sun (@Fred-sun)
+'''
+
+EXAMPLES = '''
+- name: Create a SQL managed instance database
+ azure_rm_sqlmidatabase:
+ resource_group: testrg
+ managed_instance_name: testinstancename
+ database_name: newdatabase
+ collation: SQL_Latin1_General_CP1_CI_AS
+ location: eastus
+ tags:
+ key2: value2
+
+- name: Delete the SQL managed instance database
+ azure_rm_sqlmidatabase:
+ resource_group: testrg
+ managed_instance_name: testinstancename
+ database_name: newdatabase
+ state: absent
+'''
+
+RETURN = '''
+database:
+ description:
+ - A dictionary containing facts for SQL Managed Instance database info.
+ returned: always
+ type: complex
+ contains:
+ auto_complete_restore:
+ description:
+ - Whether to auto complete restore of this managed database.
+ type: bool
+ returned: always
+ sample: null
+ catalog_collation:
+ description:
+ - Collation of the metadata catalog.
+ type: str
+ returned: always
+ sample: null
+ create_mode:
+ description:
+ - Managed database create mode.
+ type: str
+ returned: always
+ sample: null
+ create_date:
+ description:
+ - Creation date of the database.
+ type: str
+ returned: always
+ sample: "2024-05-06T23:59:49.770Z"
+ database_name:
+ description:
+ - The sql mi databse name.
+ type: str
+ returned: always
+ sample: fredtest
+ default_secondary_location:
+ description:
+ - Geo paired region.
+ type: str
+ returned: always
+ sample: westus
+ id:
+ description:
+ - The resource ID.
+ type: str
+ returned: always
+ sample: "/subscriptions/xxx-xxxx/resourceGroups/testRG/providers/Microsoft.Sql/managedInstances/fredsqlmin/databases/fredtest"
+ last_backup_name:
+ description:
+ - Last backup file name for restore of this managed database.
+ type: str
+ returned: always
+ sample: null
+ location:
+ description:
+ - The resource's location.
+ type: str
+ returned: always
+ sample: eastus
+ long_term_retention_backup_resource_id:
+ description:
+ - The name of the Long Term Retention backup to be used for restore of this managed database.
+ type: str
+ returned: always
+ sample: null
+ managed_instance_name:
+ description:
+ - The name of the SQL managed instance.
+ type: str
+ returned: always
+ sample: fredsqlmin
+ recoverable_database_id:
+ description:
+ - The resource identifier of the recoverable database associated with the database.
+ type: str
+ returned: always
+ sample: null
+ resource_group:
+ description:
+ - The resource's resource group.
+ type: str
+ returned: always
+ sample: testRG
+ restorable_dropped_database_id:
+ description:
+ - The restorable dropped database resource id.
+ type: str
+ returned: always
+ sample: null
+ restore_point_in_time:
+ description:
+ - Specifies the point in time (ISO8601 format) of the source database.
+ type: str
+ returned: always
+ sample: null
+ source_database_id:
+ description:
+ - The resource identifier of the source database associated with create operation of this database.
+ type: str
+ returned: always
+ sample: null
+ status:
+ description:
+ - Status of the database.
+ type: str
+ returned: always
+ sample: online
+ storage_container_sas_token:
+ description:
+ - Specifies the storage container sas token.
+ type: str
+ returned: always
+ sample: null
+ storage_container_uri:
+ description:
+ - Specifies the uri of the storage container where backups for this restore are stopped.
+ type: str
+ returned: always
+ sample: null
+ tags:
+ description:
+ - The resource's tags
+ type: str
+ returned: always
+ sample: {key1: value1}
+ type:
+ description:
+ - The resource type.
+ type: str
+ returned: always
+ sample: "Microsoft.Sql/managedInstances/databases"
+
+'''
+
+from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from azure.core.exceptions import HttpResponseError
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class AzureRMSqlMIDatabase(AzureRMModuleBase):
+ def __init__(self):
+ # define user inputs into argument
+ self.module_arg_spec = dict(
+ resource_group=dict(
+ type='str',
+ required=True,
+ ),
+ managed_instance_name=dict(
+ type='str',
+ required=True,
+ ),
+ database_name=dict(
+ type='str',
+ required=True,
+ ),
+ collation=dict(
+ type='str'
+ ),
+ location=dict(
+ type='str'
+ ),
+ state=dict(
+ type='str',
+ choices=['present', 'absent'],
+ default='present'
+ ),
+ )
+ # store the results of the module operation
+ self.results = dict(
+ changed=False
+ )
+ self.resource_group = None
+ self.managed_instance_name = None
+ self.database_name = None
+ self.state = None
+ self.parameters = dict()
+
+ super(AzureRMSqlMIDatabase, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=True, facts_module=False)
+
+ def exec_module(self, **kwargs):
+ for key in list(self.module_arg_spec.keys()) + ['tags']:
+ if hasattr(self, key):
+ setattr(self, key, kwargs[key])
+ elif kwargs.get(key) is not None:
+ self.parameters[key] = kwargs.get(key)
+
+ changed = False
+ resource_group = self.get_resource_group(self.resource_group)
+ if self.parameters.get('location') is None:
+ # Set default location
+ self.parameters['location'] = resource_group.location
+
+ old_response = self.get()
+ if old_response is None:
+ if self.state == 'present':
+ changed = True
+ if not self.check_mode:
+ self.results['database'] = self.create_database()
+ else:
+ update_tags, tags = self.update_tags(old_response.get('tags'))
+ if update_tags:
+ changed = True
+ self.parameters['tags'] = tags
+ for key in self.parameters.keys():
+ if key != 'tags' and self.parameters[key] != old_response.get(key):
+ self.fail("The collection and location not support to update")
+ if self.state == 'present':
+ if changed and not self.check_mode:
+ self.results['database'] = self.update_database()
+ else:
+ self.results['database'] = old_response
+ else:
+ changed = True
+ if not self.check_mode:
+ self.results['database'] = self.delete_database()
+
+ self.results['changed'] = changed
+ return self.results
+
+ def create_database(self):
+ response = None
+ try:
+ response = self.sql_client.managed_databases.begin_create_or_update(resource_group_name=self.resource_group,
+ managed_instance_name=self.managed_instance_name,
+ database_name=self.database_name,
+ parameters=self.parameters)
+ self.log("Response : {0}".format(response))
+ except HttpResponseError as ec:
+ self.fail('Create the SQL managed instance database failed, exception as {0}'.format(ec))
+
+ return self.format_item(self.get_poller_result(response))
+
+ def update_database(self):
+ response = None
+ try:
+ response = self.sql_client.managed_databases.begin_update(resource_group_name=self.resource_group,
+ managed_instance_name=self.managed_instance_name,
+ database_name=self.database_name,
+ parameters=self.parameters)
+ self.log("Response : {0}".format(response))
+ except HttpResponseError as ec:
+ self.fail('Update the SQL managed instance database failed, exception as {0}'.format(ec))
+
+ return self.format_item(self.get_poller_result(response))
+
+ def get(self):
+ response = None
+ try:
+ response = self.sql_client.managed_databases.get(resource_group_name=self.resource_group,
+ managed_instance_name=self.managed_instance_name,
+ database_name=self.database_name)
+ self.log("Response : {0}".format(response))
+ except HttpResponseError as ec:
+ self.log('Could not get facts for SQL managed instance database. Exception as {0}'.format(ec))
+
+ return self.format_item(response)
+
+ def delete_database(self):
+ response = None
+ try:
+ response = self.sql_client.managed_databases.begin_delete(resource_group_name=self.resource_group,
+ managed_instance_name=self.managed_instance_name,
+ database_name=self.database_name)
+ self.log("Response : {0}".format(response))
+ except HttpResponseError as ec:
+ self.fail('Could not get facts for SQL managed instance database. Exception as {0}'.format(ec))
+
+ return self.format_item(self.get_poller_result(response))
+
+ def format_item(self, item):
+ if item is None:
+ return
+ d = item.as_dict()
+ d = {
+ 'resource_group': self.resource_group,
+ 'managed_instance_name': self.managed_instance_name,
+ 'database_name': d.get('name'),
+ 'id': d.get('id', None),
+ 'type': d.get('type', None),
+ 'location': d.get('location'),
+ 'tags': d.get('tags'),
+ 'collation': d.get('collation'),
+ 'status': d.get('status'),
+ 'creation_date': d.get('creation_date'),
+ 'restore_point_in_time': d.get('restore_point_in_time'),
+ 'default_secondary_location': d.get('default_secondary_location'),
+ 'catalog_collation': d.get('catalog_collation'),
+ 'create_mode': d.get('create_mode'),
+ 'storage_container_uri': d.get('storage_container_uri'),
+ 'source_database_id': d.get('source_database_id'),
+ 'restorable_dropped_database_id': d.get('restorable_dropped_database_id'),
+ 'storage_container_sas_token': d.get('storage_container_sas_token'),
+ 'recoverable_database_id': d.get('recoverable_database_id'),
+ 'long_term_retention_backup_resource_id': d.get('long_term_retention_backup_resource_id'),
+ 'auto_complete_restore': d.get('auto_complete_restore'),
+ 'last_backup_name': d.get('last_backup_name')
+ }
+ return d
+
+
+def main():
+ AzureRMSqlMIDatabase()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidatabase_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidatabase_info.py
new file mode 100644
index 000000000..d6fe211a0
--- /dev/null
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidatabase_info.py
@@ -0,0 +1,305 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2024 xuzhang3 (@xuzhang3), Fred-sun (@Fred-sun)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_sqlmidatabase_info
+version_added: "2.4.0"
+short_description: Get Azure SQL managed instance database facts
+description:
+ - Get facts of Azure SQL managed instance database facts.
+
+options:
+ resource_group:
+ description:
+ - The name of the resource group that contains the resource.
+ type: str
+ required: true
+ managed_instance_name:
+ description:
+ - The name of the SQL managed instance.
+ type: str
+ required: true
+ database_name:
+ description:
+ - The name of the SQL managed instance database.
+ type: str
+ tags:
+ description:
+ - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
+ type: list
+ elements: str
+extends_documentation_fragment:
+ - azure.azcollection.azure
+
+author:
+ - xuzhang3 (@xuzhang3)
+ - Fred-sun (@Fred-sun)
+'''
+
+EXAMPLES = '''
+- name: Get SQL managed instance database by name
+ azure_rm_sqlmidatabase_info:
+ resource_group: testrg
+ managed_instance_name: testinstancename
+ database_name: newdatabase
+'''
+
+RETURN = '''
+database:
+ description:
+ - A dictionary containing facts for SQL Managed Instance database info.
+ returned: always
+ type: complex
+ contains:
+ auto_complete_restore:
+ description:
+ - Whether to auto complete restore of this managed database.
+ type: bool
+ returned: always
+ sample: null
+ catalog_collation:
+ description:
+ - Collation of the metadata catalog.
+ type: str
+ returned: always
+ sample: null
+ create_mode:
+ description:
+ - Managed database create mode.
+ type: str
+ returned: always
+ sample: null
+ create_date:
+ description:
+ - Creation date of the database.
+ type: str
+ returned: always
+ sample: "2024-05-06T23:59:49.770Z"
+ database_name:
+ description:
+ - The sql mi databse name.
+ type: str
+ returned: always
+ sample: fredtest
+ default_secondary_location:
+ description:
+ - Geo paired region.
+ type: str
+ returned: always
+ sample: westus
+ id:
+ description:
+ - The resource ID.
+ type: str
+ returned: always
+ sample: "/subscriptions/xxx-xxxx/resourceGroups/testRG/providers/Microsoft.Sql/managedInstances/fredsqlmin/databases/fredtest"
+ last_backup_name:
+ description:
+ - Last backup file name for restore of this managed database.
+ type: str
+ returned: always
+ sample: null
+ location:
+ description:
+ - The resource's location.
+ type: str
+ returned: always
+ sample: eastus
+ long_term_retention_backup_resource_id:
+ description:
+ - The name of the Long Term Retention backup to be used for restore of this managed database.
+ type: str
+ returned: always
+ sample: null
+ managed_instance_name:
+ description:
+ - The name of the SQL managed instance.
+ type: str
+ returned: always
+ sample: fredsqlmin
+ recoverable_database_id:
+ description:
+ - The resource identifier of the recoverable database associated with the database.
+ type: str
+ returned: always
+ sample: null
+ resource_group:
+ description:
+ - The resource's resource group.
+ type: str
+ returned: always
+ sample: testRG
+ restorable_dropped_database_id:
+ description:
+ - The restorable dropped database resource id.
+ type: str
+ returned: always
+ sample: null
+ restore_point_in_time:
+ description:
+ - Specifies the point in time (ISO8601 format) of the source database.
+ type: str
+ returned: always
+ sample: null
+ source_database_id:
+ description:
+ - The resource identifier of the source database associated with create operation of this database.
+ type: str
+ returned: always
+ sample: null
+ status:
+ description:
+ - Status of the database.
+ type: str
+ returned: always
+ sample: online
+ storage_container_sas_token:
+ description:
+ - Specifies the storage container sas token.
+ type: str
+ returned: always
+ sample: null
+ storage_container_uri:
+ description:
+ - Specifies the uri of the storage container where backups for this restore are stopped.
+ type: str
+ returned: always
+ sample: null
+ tags:
+ description:
+ - The resource's tags
+ type: str
+ returned: always
+ sample: {key1: value1}
+ type:
+ description:
+ - The resource type.
+ type: str
+ returned: always
+ sample: "Microsoft.Sql/managedInstances/databases"
+'''
+
+from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from azure.core.exceptions import HttpResponseError
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class AzureRMSqlMIDatabaseInfo(AzureRMModuleBase):
+ def __init__(self):
+ # define user inputs into argument
+ self.module_arg_spec = dict(
+ resource_group=dict(
+ type='str',
+ required=True,
+ ),
+ managed_instance_name=dict(
+ type='str',
+ required=True,
+ ),
+ database_name=dict(
+ type='str',
+ ),
+ tags=dict(
+ type='list',
+ elements='str'
+ ),
+ )
+ # store the results of the module operation
+ self.results = dict(
+ changed=False
+ )
+ self.resource_group = None
+ self.managed_instance_name = None
+ self.database_name = None
+ self.tags = None
+
+ super(AzureRMSqlMIDatabaseInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False, facts_module=True)
+
+ def exec_module(self, **kwargs):
+ for key in self.module_arg_spec:
+ setattr(self, key, kwargs[key])
+
+ if self.database_name is not None:
+ self.results['database'] = self.get()
+ else:
+ self.results['database'] = self.list_by_instance()
+ return self.results
+
+ def list_by_instance(self):
+ response = None
+ results = []
+ try:
+ response = self.sql_client.managed_databases.list_by_instance(resource_group_name=self.resource_group,
+ managed_instance_name=self.managed_instance_name)
+ self.log("Response : {0}".format(response))
+ except HttpResponseError:
+ self.log('Could not get facts for SQL managed instance database.')
+
+ if response is not None:
+ for item in response:
+ if self.has_tags(item.tags, self.tags):
+ results.append(self.format_item(item))
+ return results
+
+ def get(self):
+ response = None
+ try:
+ response = self.sql_client.managed_databases.get(resource_group_name=self.resource_group,
+ managed_instance_name=self.managed_instance_name,
+ database_name=self.database_name)
+ self.log("Response : {0}".format(response))
+ except HttpResponseError as ec:
+ self.log('Could not get facts for SQL managed instance database.')
+
+ if response is not None and self.has_tags(response.tags, self.tags):
+ return [self.format_item(response)]
+
+ def format_item(self, item):
+ d = item.as_dict()
+ d = {
+ 'resource_group': self.resource_group,
+ 'managed_instance_name': self.managed_instance_name,
+ 'database_name': d.get('name'),
+ 'id': d.get('id', None),
+ 'type': d.get('type', None),
+ 'location': d.get('location'),
+ 'tags': d.get('tags'),
+ 'collation': d.get('collation'),
+ 'status': d.get('status'),
+ 'creation_date': d.get('creation_date'),
+ 'earliest_restore_point': d.get('earliest_restore_point'),
+ 'restore_point_in_time': d.get('restore_point_in_time'),
+ 'default_secondary_location': d.get('default_secondary_location'),
+ 'catalog_collation': d.get('catalog_collation'),
+ 'create_mode': d.get('create_mode'),
+ 'storage_container_uri': d.get('storage_container_uri'),
+ 'source_database_id': d.get('source_database_id'),
+ 'restorable_dropped_database_id': d.get('restorable_dropped_database_id'),
+ 'storage_container_sas_token': d.get('storage_container_sas_token'),
+ 'failover_group_id': d.get('failover_group_id'),
+ 'recoverable_database_id': d.get('recoverable_database_id'),
+ 'long_term_retention_backup_resource_id': d.get('long_term_retention_backup_resource_id'),
+ 'auto_complete_restore': d.get('auto_complete_restore'),
+ 'last_backup_name': d.get('last_backup_name')
+ }
+ return d
+
+
+def main():
+ AzureRMSqlMIDatabaseInfo()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidblongtermretentionpolicy.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidblongtermretentionpolicy.py
new file mode 100644
index 000000000..04b05b1c2
--- /dev/null
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidblongtermretentionpolicy.py
@@ -0,0 +1,277 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2024 xuzhang3 (@xuzhang3), Fred-sun (@Fred-sun)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_sqlmidblongtermretentionpolicy
+version_added: "2.4.0"
+short_description: Manage Azure SQL Managed Instance long-term backup retention
+description:
+ - Manage Azure SQL Managed Instance long-term backup retention.
+
+options:
+ resource_group:
+ description:
+ - The name of the resource group that contains the resource.
+ type: str
+ required: true
+ managed_instance_name:
+ description:
+ - The name of the SQL managed instance.
+ type: str
+ required: true
+ database_name:
+ description:
+ - The name of the SQL managed instance database.
+ type: str
+ required: true
+ policy_name:
+ description:
+ - The name of the SQL managed instance long term retention policy.
+ type: str
+ required: true
+ choices:
+ - default
+ monthly_retention:
+ description:
+ - The monthly retention policy for an LTR backup in an ISO 8601 format.
+ type: str
+ yearly_retention:
+ description:
+ - The yearly retention policy for an LTR backup in an ISO 8601 format.
+ type: str
+ weekly_retention:
+ description:
+ - The weekly retention policy for an LTR backup in an ISO 8601 format.
+ type: str
+ week_of_year:
+ description:
+ - The week of year to take the yearly backup in an ISO 8601 format.
+ type: int
+
+extends_documentation_fragment:
+ - azure.azcollection.azure
+
+author:
+ - xuzhang3 (@xuzhang3)
+ - Fred-sun (@Fred-sun)
+'''
+
+EXAMPLES = '''
+- name: Update SQL managed instance long term retention policy's retention_days
+ azure_rm_sqlmidblongtermretentionpolicy:
+ resource_group: testrg
+ managed_instance_name: testinstancename
+ database_name: newdatabase
+ policy_name: default
+ monthly_retention: P3M
+ week_of_year: 17
+ weekly_retention: P13W
+ yearly_retention: P6Y
+'''
+
+RETURN = '''
+long_term_retention_policy:
+ description:
+ - A dictionary containing facts for SQL Managed Instance Short Term Retention Policies.
+ returned: always
+ type: complex
+ contains:
+ id:
+ description:
+ - Resource ID.
+ returned: always
+ type: str
+ sample: "/subscriptions/xxx-xxx/resourceGroups/testRG/providers/Microsoft.Sql/
+ managedInstances/fredsqlmi/databases/newdatabase/backupShortTermRetentionPolicies/default"
+ database_name:
+ description:
+ - SQL managed instance database name.
+ returned: always
+ type: str
+ sample: newdatabase
+ policy_name:
+ description:
+ - SQL managed instance long term retentioni policy name.
+ returned: always
+ type: str
+ sample: default
+ managed_instance_name:
+ description:
+ - SQL managed instance name.
+ returned: always
+ type: str
+ sample: testmanagedinstance
+ type:
+ description:
+ - The SQL managed instance type.
+ type: str
+ returned: always
+ sample: "Microsoft.Sql/managedInstances"
+ resource_group:
+ description:
+ - The resource relate resource group.
+ type: str
+ returned: always
+ sample: testRG
+ week_of_year:
+ description:
+ - The week of year to take the yearly backup in an ISO 8601 format.
+ type: int
+ sample: 7
+ returned: always
+ weekly_retention:
+ description:
+ - The weekly retention policy for an LTR backup in an ISO 8601 format.
+ type: str
+ sample: P13W
+ returned: always
+ monthly_retention:
+ description:
+ - The monthly retention policy for an LTR backup in an ISO 8601 format.
+ type: str
+ sample: P3M
+ returned: always
+ yearly_retention:
+ description:
+ - The yearly retention policy for an LTR backup in an ISO 8601 format.
+ type: str
+ sample: P6Y
+ returned: always
+'''
+
+from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from azure.core.exceptions import HttpResponseError
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class AzureRMSqMILongTermRetentionPolicy(AzureRMModuleBase):
+ def __init__(self):
+ # define user inputs into argument
+ self.module_arg_spec = dict(
+ resource_group=dict(
+ type='str',
+ required=True,
+ ),
+ managed_instance_name=dict(
+ type='str',
+ required=True,
+ ),
+ database_name=dict(
+ type='str',
+ required=True,
+ ),
+ policy_name=dict(
+ type='str',
+ required=True,
+ choices=['default']
+ ),
+ weekly_retention=dict(
+ type='str',
+ ),
+ monthly_retention=dict(
+ type='str'
+ ),
+ yearly_retention=dict(
+ type='str'
+ ),
+ week_of_year=dict(
+ type='int'
+ )
+ )
+ # store the results of the module operation
+ self.parameters = dict()
+ self.results = dict(
+ changed=False,
+ diff=[]
+ )
+ self.resource_group = None
+ self.managed_instance_name = None
+ self.database_name = None
+ self.policy_name = None
+
+ super(AzureRMSqMILongTermRetentionPolicy, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False, facts_module=True)
+
+ def exec_module(self, **kwargs):
+ for key in self.module_arg_spec:
+ if hasattr(self, key):
+ setattr(self, key, kwargs[key])
+ else:
+ self.parameters[key] = kwargs.get(key)
+
+ old_response = self.get()
+
+ if old_response is not None:
+ for key in self.parameters.keys():
+ if self.parameters[key] is not None and old_response[key] != self.parameters[key]:
+ self.results['changed'] = True
+ self.results['diff'].append(key)
+ if self.results['changed'] and not self.check_mode:
+ self.results['long_term_retention_policy'] = self.create_or_update_policy()
+ else:
+ self.results['changed'] = True
+ if not self.check_mode:
+ self.results['long_term_retention_policy'] = self.create_or_update_policy()
+ return self.results
+
+ def get(self):
+ response = None
+ try:
+ response = self.sql_client.managed_instance_long_term_retention_policies.get(resource_group_name=self.resource_group,
+ managed_instance_name=self.managed_instance_name,
+ database_name=self.database_name,
+ policy_name=self.policy_name)
+ self.log("Response : {0}".format(response))
+ except HttpResponseError:
+ self.log('Could not get facts for SQL managed instance long term retention policyes.')
+
+ return self.format_item(response) if response is not None else None
+
+ def create_or_update_policy(self):
+ response = None
+ try:
+ response = self.sql_client.managed_instance_long_term_retention_policies.begin_create_or_update(resource_group_name=self.resource_group,
+ managed_instance_name=self.managed_instance_name,
+ database_name=self.database_name,
+ policy_name=self.policy_name,
+ parameters=self.parameters)
+ self.log("Response : {0}".format(response))
+ except HttpResponseError as ec:
+ self.fail('Could not create SQL managed instance long term retention policyes. Exception info as {0}'.format(ec))
+
+ return self.format_item(self.get_poller_result(response))
+
+ def format_item(self, item):
+ d = item.as_dict()
+ d = {
+ 'resource_group': self.resource_group,
+ 'managed_instance_name': self.managed_instance_name,
+ 'database_name': self.database_name,
+ 'id': d.get('id', None),
+ 'name': d.get('name', None),
+ 'type': d.get('type', None),
+ "monthly_retention": d.get("monthly_retention"),
+ "week_of_year": d.get("week_of_year"),
+ "weekly_retention": d.get("weekly_retention"),
+ "yearly_retention": d.get("yearly_retention")
+ }
+ return d
+
+
+def main():
+ AzureRMSqMILongTermRetentionPolicy()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidblongtermretentionpolicy_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidblongtermretentionpolicy_info.py
new file mode 100644
index 000000000..11f7bce16
--- /dev/null
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidblongtermretentionpolicy_info.py
@@ -0,0 +1,228 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2024 xuzhang3 (@xuzhang3), Fred-sun (@Fred-sun)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_sqlmidblongtermretentionpolicy_info
+version_added: "2.4.0"
+short_description: Get Azure SQL managed instance facts
+description:
+ - Get facts of Azure SQL managed instance facts.
+
+options:
+ resource_group:
+ description:
+ - The name of the resource group that contains the resource.
+ type: str
+ required: true
+ managed_instance_name:
+ description:
+ - The name of the SQL managed instance.
+ type: str
+ required: true
+ database_name:
+ description:
+ - The name of the SQL managed instance database.
+ type: str
+ required: true
+ policy_name:
+ description:
+ - The name of the SQL managed instance short term retention policy.
+ type: str
+ choices:
+ - default
+
+extends_documentation_fragment:
+ - azure.azcollection.azure
+
+author:
+ - xuzhang3 (@xuzhang3)
+ - Fred-sun (@Fred-sun)
+'''
+
+EXAMPLES = '''
+- name: Get SQL managed instance long term retention policy by name
+ azure_rm_sqlmidblongtermretentionpolicy_info:
+ resource_group: testrg
+ managed_instance_name: testinstancename
+ database_name: newdatabase
+ policy_name: default
+'''
+
+RETURN = '''
+short_term_retention_policy:
+ description:
+ - A dictionary containing facts for SQL Managed Instance Short Term Retention Policies.
+ returned: always
+ type: complex
+ contains:
+ id:
+ description:
+ - Resource ID.
+ returned: always
+ type: str
+ sample: "/subscriptions/xxx-xxx/resourceGroups/testRG/providers/Microsoft.Sql/
+ managedInstances/fredsqlmi/databases/newdatabase/backupShortTermRetentionPolicies/default"
+ database_name:
+ description:
+ - SQL managed instance database name.
+ returned: always
+ type: str
+ sample: newdatabase
+ policy_name:
+ description:
+ - SQL managed instance short term retentioni policy name.
+ returned: always
+ type: str
+ sample: default
+ managed_instance_name:
+ description:
+ - SQL managed instance name.
+ returned: always
+ type: str
+ sample: testmanagedinstance
+ type:
+ description:
+ - The SQL managed instance short term retention policy type.
+ type: str
+ returned: always
+ sample: "Microsoft.Sql/managedInstances/databases/backupShortTermRetentionPolicies"
+ resource_group:
+ description:
+ - The resource relate resource group.
+ type: str
+ returned: always
+ sample: testRG
+ week_of_year:
+ description:
+ - The week of year to take the yearly backup in an ISO 8601 format.
+ type: int
+ sample: 7
+ returned: always
+ weekly_retention:
+ description:
+ - The weekly retention policy for an LTR backup in an ISO 8601 format.
+ type: str
+ sample: P13W
+ returned: always
+ monthly_retention:
+ description:
+ - The monthly retention policy for an LTR backup in an ISO 8601 format.
+ type: str
+ sample: P3M
+ returned: always
+ yearly_retention:
+ description:
+ - The yearly retention policy for an LTR backup in an ISO 8601 format.
+ type: str
+ sample: P6Y
+ returned: always
+'''
+
+from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from azure.core.exceptions import HttpResponseError
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class AzureRMSqMILongTermRetentionPolicyInfo(AzureRMModuleBase):
+ def __init__(self):
+ # define user inputs into argument
+ self.module_arg_spec = dict(
+ resource_group=dict(
+ type='str',
+ required=True,
+ ),
+ managed_instance_name=dict(
+ type='str',
+ required=True,
+ ),
+ database_name=dict(
+ type='str',
+ required=True,
+ ),
+ policy_name=dict(
+ type='str',
+ choices=['default']
+ ),
+ )
+ # store the results of the module operation
+ self.results = dict(
+ changed=False
+ )
+ self.resource_group = None
+ self.managed_instance_name = None
+ self.database_name = None
+ self.policy_name = None
+
+ super(AzureRMSqMILongTermRetentionPolicyInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False, facts_module=True)
+
+ def exec_module(self, **kwargs):
+ for key in self.module_arg_spec:
+ setattr(self, key, kwargs[key])
+
+ if self.policy_name is not None:
+ self.results['long_term_retention_policy'] = self.get()
+ else:
+ self.results['long_term_retention_policy'] = self.list_by_database()
+ return self.results
+
+ def list_by_database(self):
+ response = None
+ try:
+ response = self.sql_client.managed_instance_long_term_retention_policies.list_by_database(resource_group_name=self.resource_group,
+ managed_instance_name=self.managed_instance_name,
+ database_name=self.database_name)
+ self.log("Response : {0}".format(response))
+ except HttpResponseError:
+ self.log('Could not get facts for SQL managed instance short term retention policyes.')
+
+ return [self.format_item(item) for item in response] if response is not None else []
+
+ def get(self):
+ response = None
+ try:
+ response = self.sql_client.managed_instance_long_term_retention_policies.get(resource_group_name=self.resource_group,
+ managed_instance_name=self.managed_instance_name,
+ database_name=self.database_name,
+ policy_name=self.policy_name)
+ self.log("Response : {0}".format(response))
+ except HttpResponseError as ec:
+ self.fail(ec)
+ self.log('Could not get facts for SQL managed instance short term retention policyes.')
+
+ return [self.format_item(response)] if response is not None else None
+
+ def format_item(self, item):
+ d = item.as_dict()
+ d = {
+ 'resource_group': self.resource_group,
+ 'managed_instance_name': self.managed_instance_name,
+ 'database_name': self.database_name,
+ 'id': d.get('id', None),
+ 'name': d.get('name', None),
+ 'type': d.get('type', None),
+ "monthly_retention": d.get("monthly_retention"),
+ "week_of_year": d.get("week_of_year"),
+ "weekly_retention": d.get("weekly_retention"),
+ "yearly_retention": d.get("yearly_retention")
+ }
+ return d
+
+
+def main():
+ AzureRMSqMILongTermRetentionPolicyInfo()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidbshorttermretentionpolicy.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidbshorttermretentionpolicy.py
new file mode 100644
index 000000000..477c393bb
--- /dev/null
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidbshorttermretentionpolicy.py
@@ -0,0 +1,244 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2024 xuzhang3 (@xuzhang3), Fred-sun (@Fred-sun)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_sqlmidbshorttermretentionpolicy
+version_added: "2.4.0"
+short_description: Manage SQL Managed Instance database backup short term retention policy
+description:
+ - Manage SQL Managed Instance database backup short term retention policy.
+
+options:
+ resource_group:
+ description:
+ - The name of the resource group that contains the resource.
+ type: str
+ required: true
+ managed_instance_name:
+ description:
+ - The name of the SQL managed instance.
+ type: str
+ required: true
+ database_name:
+ description:
+ - The name of the SQL managed instance database.
+ type: str
+ required: true
+ policy_name:
+ description:
+ - The name of the SQL managed instance short term retention policy.
+ type: str
+ required: true
+ choices:
+ - default
+ retention_days:
+ description:
+ - The backup retention period in days. This is how many days Point-in-Time.
+ type: int
+ default: 7
+
+extends_documentation_fragment:
+ - azure.azcollection.azure
+
+author:
+ - xuzhang3 (@xuzhang3)
+ - Fred-sun (@Fred-sun)
+'''
+
+EXAMPLES = '''
+- name: Update SQL managed instance short term retention policy's retention_days
+ azure_rm_sqlmidbshorttermretentionpolicy:
+ resource_group: testrg
+ managed_instance_name: testinstancename
+ database_name: newdatabase
+ policy_name: default
+ retention_days: 3
+'''
+
+RETURN = '''
+short_term_retention_policy:
+ description:
+ - A dictionary containing facts for SQL Managed Instance Short Term Retention Policies.
+ returned: always
+ type: complex
+ contains:
+ id:
+ description:
+ - Resource ID.
+ returned: always
+ type: str
+ sample: "/subscriptions/xxx-xxx/resourceGroups/testRG/providers/Microsoft.Sql/
+ managedInstances/fredsqlmi/databases/newdatabase/backupShortTermRetentionPolicies/default"
+ database_name:
+ description:
+ - SQL managed instance database name.
+ returned: always
+ type: str
+ sample: newdatabase
+ policy_name:
+ description:
+ - SQL managed instance short term retentioni policy name.
+ returned: always
+ type: str
+ sample: default
+ managed_instance_name:
+ description:
+ - SQL managed instance name.
+ returned: always
+ type: str
+ sample: testmanagedinstance
+ type:
+ description:
+ - The SQL managed instance type.
+ type: str
+ returned: always
+ sample: "Microsoft.Sql/managedInstances"
+ resource_group:
+ description:
+ - The resource relate resource group.
+ type: str
+ returned: always
+ sample: testRG
+ retention_days:
+ description:
+ - The backup retention period in days. This is how many days Point-in-Time.
+ type: int
+ sample: 7
+ returned: always
+'''
+
+from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from azure.core.exceptions import HttpResponseError
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class AzureRMSqMIShortTermRetentionPolicy(AzureRMModuleBase):
+ def __init__(self):
+ # define user inputs into argument
+ self.module_arg_spec = dict(
+ resource_group=dict(
+ type='str',
+ required=True,
+ ),
+ managed_instance_name=dict(
+ type='str',
+ required=True,
+ ),
+ database_name=dict(
+ type='str',
+ required=True,
+ ),
+ policy_name=dict(
+ type='str',
+ required=True,
+ choices=['default']
+ ),
+ retention_days=dict(
+ type='int',
+ default=7
+ ),
+ )
+ # store the results of the module operation
+ self.results = dict(
+ changed=False,
+ diff=[]
+ )
+ self.resource_group = None
+ self.managed_instance_name = None
+ self.database_name = None
+ self.policy_name = None
+ self.retention_days = None
+
+ super(AzureRMSqMIShortTermRetentionPolicy, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False, facts_module=True)
+
+ def exec_module(self, **kwargs):
+ for key in self.module_arg_spec:
+ setattr(self, key, kwargs[key])
+
+ old_response = self.get()
+
+ if old_response is not None:
+ if self.retention_days is not None and old_response['retention_days'] != self.retention_days:
+ self.results['changed'] = True
+ self.results['diff'].append('retention_days')
+ if not self.check_mode:
+ self.results['short_term_retention_policy'] = self.update_policy()
+ else:
+ self.results['changed'] = True
+ if not self.check_mode:
+ self.results['short_term_retention_policy'] = self.create_policy()
+ return self.results
+
+ def get(self):
+ response = None
+ try:
+ response = self.sql_client.managed_backup_short_term_retention_policies.get(resource_group_name=self.resource_group,
+ managed_instance_name=self.managed_instance_name,
+ database_name=self.database_name,
+ policy_name=self.policy_name)
+ self.log("Response : {0}".format(response))
+ except HttpResponseError:
+ self.log('Could not get facts for SQL managed instance short term retention policyes.')
+
+ return self.format_item(response) if response is not None else None
+
+ def update_policy(self):
+ response = None
+ try:
+ response = self.sql_client.managed_backup_short_term_retention_policies.begin_update(resource_group_name=self.resource_group,
+ managed_instance_name=self.managed_instance_name,
+ database_name=self.database_name,
+ policy_name=self.policy_name,
+ parameters=dict(retention_days=self.retention_days))
+ self.log("Response : {0}".format(response))
+ except HttpResponseError as ec:
+ self.fail('Could not update the SQL managed instance short term retention policyes. Exception as {0}'.format(ec))
+
+ return self.format_item(self.get_poller_result(response))
+
+ def create_policy(self):
+ response = None
+ try:
+ response = self.sql_client.managed_backup_short_term_retention_policies.begin_create_or_update(resource_group_name=self.resource_group,
+ managed_instance_name=self.managed_instance_name,
+ database_name=self.database_name,
+ policy_name=self.policy_name,
+ parameters=dict(retention_days=self.retention_days))
+ self.log("Response : {0}".format(response))
+ except HttpResponseError as ec:
+ self.fail('Could not Create the SQL managed instance short term retention policyes. Exception as {0}'.format(ec))
+
+ return self.format_item(self.get_poller_result(response))
+
+ def format_item(self, item):
+ d = item.as_dict()
+ d = {
+ 'resource_group': self.resource_group,
+ 'managed_instance_name': self.managed_instance_name,
+ 'database_name': self.database_name,
+ 'id': d.get('id', None),
+ 'name': d.get('name', None),
+ 'type': d.get('type', None),
+ 'retention_days': d.get('retention_days', None),
+ }
+ return d
+
+
+def main():
+ AzureRMSqMIShortTermRetentionPolicy()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidbshorttermretentionpolicy_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidbshorttermretentionpolicy_info.py
new file mode 100644
index 000000000..b3665b66d
--- /dev/null
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidbshorttermretentionpolicy_info.py
@@ -0,0 +1,206 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2024 xuzhang3 (@xuzhang3), Fred-sun (@Fred-sun)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_sqlmidbshorttermretentionpolicy_info
+version_added: "2.4.0"
+short_description: Get Azure SQL managed instance short term retention policy
+description:
+ - Get Azure SQL managed instance short term retention policy.
+
+options:
+ resource_group:
+ description:
+ - The name of the resource group that contains the resource.
+ type: str
+ required: true
+ managed_instance_name:
+ description:
+ - The name of the SQL managed instance.
+ type: str
+ required: true
+ database_name:
+ description:
+ - The name of the SQL managed instance database.
+ type: str
+ required: true
+ policy_name:
+ description:
+ - The name of the SQL managed instance short term retention policy.
+ type: str
+ choices:
+ - default
+
+extends_documentation_fragment:
+ - azure.azcollection.azure
+
+author:
+ - xuzhang3 (@xuzhang3)
+ - Fred-sun (@Fred-sun)
+'''
+
+EXAMPLES = '''
+- name: Get SQL managed instance short term retention policy by name
+ azure_rm_sqlmidbshorttermretentionpolicy_info:
+ resource_group: testrg
+ managed_instance_name: testinstancename
+ database_name: newdatabase
+ policy_name: default
+'''
+
+RETURN = '''
+short_term_retention_policy:
+ description:
+ - A dictionary containing facts for SQL Managed Instance Short Term Retention Policies.
+ returned: always
+ type: complex
+ contains:
+ id:
+ description:
+ - Resource ID.
+ returned: always
+ type: str
+ sample: "/subscriptions/xxx-xxx/resourceGroups/testRG/providers/Microsoft.Sql/
+ managedInstances/fredsqlmi/databases/newdatabase/backupShortTermRetentionPolicies/default"
+ database_name:
+ description:
+ - SQL managed instance database name.
+ returned: always
+ type: str
+ sample: newdatabase
+ policy_name:
+ description:
+ - SQL managed instance short term retentioni policy name.
+ returned: always
+ type: str
+ sample: default
+ managed_instance_name:
+ description:
+ - SQL managed instance name.
+ returned: always
+ type: str
+ sample: testmanagedinstance
+ type:
+ description:
+ - The SQL managed instance short term retention policy type.
+ type: str
+ returned: always
+ sample: "Microsoft.Sql/managedInstances/databases/backupShortTermRetentionPolicies"
+ resource_group:
+ description:
+ - The resource relate resource group.
+ type: str
+ returned: always
+ sample: testRG
+ retention_days:
+ description:
+ - The backup retention period in days. This is how many days Point-in-Time
+ type: int
+ sample: 7
+ returned: always
+'''
+
+from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from azure.core.exceptions import HttpResponseError
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class AzureRMSqMIShortTermRetentionPolicyInfo(AzureRMModuleBase):
+ def __init__(self):
+ # define user inputs into argument
+ self.module_arg_spec = dict(
+ resource_group=dict(
+ type='str',
+ required=True,
+ ),
+ managed_instance_name=dict(
+ type='str',
+ required=True,
+ ),
+ database_name=dict(
+ type='str',
+ required=True,
+ ),
+ policy_name=dict(
+ type='str',
+ choices=['default']
+ ),
+ )
+ # store the results of the module operation
+ self.results = dict(
+ changed=False
+ )
+ self.resource_group = None
+ self.managed_instance_name = None
+ self.database_name = None
+ self.policy_name = None
+
+ super(AzureRMSqMIShortTermRetentionPolicyInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False, facts_module=True)
+
+ def exec_module(self, **kwargs):
+ for key in self.module_arg_spec:
+ setattr(self, key, kwargs[key])
+
+ if self.policy_name is not None:
+ self.results['short_term_retention_policy'] = self.get()
+ else:
+ self.results['short_term_retention_policy'] = self.list_by_database()
+ return self.results
+
+ def list_by_database(self):
+ response = None
+ try:
+ response = self.sql_client.managed_backup_short_term_retention_policies.list_by_database(resource_group_name=self.resource_group,
+ managed_instance_name=self.managed_instance_name,
+ database_name=self.database_name)
+ self.log("Response : {0}".format(response))
+ except HttpResponseError:
+ self.log('Could not get facts for SQL managed instance short term retention policyes.')
+
+ return [self.format_item(item) for item in response] if response is not None else []
+
+ def get(self):
+ response = None
+ try:
+ response = self.sql_client.managed_backup_short_term_retention_policies.get(resource_group_name=self.resource_group,
+ managed_instance_name=self.managed_instance_name,
+ database_name=self.database_name,
+ policy_name=self.policy_name)
+ self.log("Response : {0}".format(response))
+ except HttpResponseError as ec:
+ self.log('Could not get facts for SQL managed instance short term retention policyes.')
+
+ return [self.format_item(response)] if response is not None else None
+
+ def format_item(self, item):
+ d = item.as_dict()
+ d = {
+ 'resource_group': self.resource_group,
+ 'managed_instance_name': self.managed_instance_name,
+ 'database_name': self.database_name,
+ 'id': d.get('id', None),
+ 'name': d.get('name', None),
+ 'type': d.get('type', None),
+ 'retention_days': d.get('retention_days', None),
+ }
+ return d
+
+
+def main():
+ AzureRMSqMIShortTermRetentionPolicyInfo()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_storageaccountmanagementpolicy.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_storageaccountmanagementpolicy.py
new file mode 100644
index 000000000..c0efaaf0b
--- /dev/null
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_storageaccountmanagementpolicy.py
@@ -0,0 +1,672 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2024 xuzhang3 (@xuzhang3), Fred-sun (@Fred-sun)
+#
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_storageaccountmanagementpolicy
+version_added: "2.4.0"
+short_description: Manage storage account management policies
+description:
+ - Create, update or delete storage account management policies.
+options:
+ resource_group:
+ description:
+ - Name of the resource group to use.
+ required: true
+ type: str
+ aliases:
+ - resource_group_name
+ storage_account_name:
+ description:
+ - Name of the storage account.
+ type: str
+ required: true
+ rules:
+ description:
+ - The Storage Account ManagementPolicies Rules.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - The name of the policy rule.
+ - A rule name can contain any combination of alpha numeric characters.
+ type: str
+ required: true
+ type:
+ description:
+ - The type of the policy rule.
+ type: str
+ required: true
+ choices:
+ - Lifecycle
+ enabled:
+ description:
+ - Whether to enabled the rule
+ type: bool
+ definition:
+ description:
+ - Whether to enabled the rule
+ required: true
+ type: dict
+ suboptions:
+ actions:
+ description:
+ - An object that defines the action set.
+ type: dict
+ required: true
+ suboptions:
+ base_blob:
+ description:
+ - The management policy action for base blob.
+ type: dict
+ suboptions:
+ tier_to_cool:
+ description:
+ - The function to tier blobs to cool storage.
+ - Support blobs currently at Hot tier.
+ type: dict
+ suboptions:
+ days_after_modification_greater_than:
+ description:
+ - Value indicating the age in days after last modification.
+ type: float
+ days_after_last_access_time_greater_than:
+ description:
+ - This property can only be used in conjunction with last access time tracking policy.
+ type: float
+ tier_to_archive:
+ description:
+ - The function to tier blobs to archive storage.
+ - Support blobs currently at Hot or Cool tier.
+ type: dict
+ suboptions:
+ days_after_modification_greater_than:
+ description:
+ - Value indicating the age in days after last modification.
+ type: float
+ days_after_last_access_time_greater_than:
+ description:
+ - This property can only be used in conjunction with last access time tracking policy.
+ type: float
+ delete:
+ description:
+ - The function to delete the blob.
+ type: dict
+ suboptions:
+ days_after_modification_greater_than:
+ description:
+ - Value indicating the age in days after last modification.
+ type: float
+ days_after_last_access_time_greater_than:
+ description:
+ - This property can only be used in conjunction with last access time tracking policy.
+ type: float
+ enable_auto_tier_to_hot_from_cool:
+ description:
+ - This property enables auto tiering of a blob from cool to hot on a blob access.
+ type: bool
+ snapshot:
+ description:
+ - The management policy action for snapshot.
+ type: dict
+ suboptions:
+ tier_to_cool:
+ description:
+ - The function to tier blob snapshot to cool storage.
+ - Support blob snapshot at Hot tier.
+ type: dict
+ suboptions:
+ days_after_creation_greater_than:
+ description:
+ - Value indicating the age in days after creation.
+ type: float
+ required: true
+ tier_to_archive:
+ description:
+ - The function to tier blob snapshot to archive storage.
+ - Support blob snapshot currently at Hot or Cool tier.
+ type: dict
+ suboptions:
+ days_after_creation_greater_than:
+ description:
+ - Value indicating the age in days after creation.
+ type: float
+ required: true
+ delete:
+ description:
+ - The function to delete the blob snapshot.
+ type: dict
+ suboptions:
+ days_after_creation_greater_than:
+ description:
+ - Value indicating the age in days after creation.
+ type: float
+ required: true
+ version:
+ description:
+ - The management policy action for version.
+ type: dict
+ suboptions:
+ tier_to_cool:
+ description:
+ - The function to tier blob version to cool storage.
+ - Support blob version currently at Hot tier.
+ type: dict
+ suboptions:
+ days_after_creation_greater_than:
+ description:
+ - Value indicating the age in days after creation.
+ type: float
+ required: true
+ tier_to_archive:
+ description:
+ - The function to tier blob version to archive storage.
+ - Support blob version currently at Hot or Cool tier.
+ type: dict
+ suboptions:
+ days_after_creation_greater_than:
+ description:
+ - Value indicating the age in days after creation.
+ type: float
+ required: true
+ delete:
+ description:
+ - The function to delete the blob version.
+ type: dict
+ suboptions:
+ days_after_creation_greater_than:
+ description:
+ - Value indicating the age in days after creation.
+ type: float
+ required: true
+ filters:
+ description:
+ - An object that defines the filter set.
+ type: dict
+ suboptions:
+ prefix_match:
+ description:
+ - An array of strings for prefixes to be match.
+ type: list
+ elements: str
+ blob_types:
+ description:
+ - An array of predefined enum values.
+ - Currently blockBlob supports all tiering and delete actions. Only delete actions are supported for C(appendBlob).
+ type: list
+ required: true
+ elements: str
+ choices:
+ - blockBlob
+ - appendBlob
+ blob_index_match:
+ description:
+ - An array of blob index tag based filters, there can be at most 10 tag filters.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - This is the filter tag name, it can have 1 - 128 characters.
+ type: str
+ required: true
+ op:
+ description:
+ - This is the comparison operator which is used for object comparison and filtering.
+ - Only C(==) (equality operator) is currently supported.
+ type: str
+ required: true
+ value:
+ description:
+ - This is the filter tag value field used for tag based filtering.
+ - It can have 0-256 characters.
+ type: str
+ required: true
+ state:
+ description:
+ - State of the storage account managed policy. Use C(present) add or update the policy rule.
+ - Use C(absent) to delete all policy rules.
+ default: present
+ type: str
+ choices:
+ - absent
+ - present
+
+extends_documentation_fragment:
+ - azure.azcollection.azure
+
+author:
+ - xuzhang3 (@xuzhang3)
+ - Fred-sun (@Fred-sun)
+'''
+
+EXAMPLES = '''
+- name: Create storage account management policy with multi parameters
+ azure_rm_storageaccountmanagementpolicy:
+ resource_group: testRG
+ storage_account_name: testaccount
+ rules:
+ - name: olcmtest5
+ type: Lifecycle
+ enabled: false
+ definition:
+ actions:
+ base_blob:
+ enable_auto_tier_to_hot_from_cool: true
+ delete:
+ days_after_modification_greater_than: 33
+ days_after_last_access_time_greater_than: 33
+ tier_to_cool:
+ days_after_modification_greater_than: 33
+ days_after_last_access_time_greater_than: 33
+ tier_to_archive:
+ days_after_modification_greater_than: 33
+ days_after_last_access_time_greater_than: 33
+ snapshot:
+ tier_to_cool:
+ days_after_creation_greater_than: 33
+ tier_to_archive:
+ days_after_creation_greater_than: 33
+ delete:
+ days_after_creation_greater_than: 33
+ version:
+ tier_to_archive:
+ days_after_creation_greater_than: 33
+ tier_to_cool:
+ days_after_creation_greater_than: 33
+ delete:
+ days_after_creation_greater_than: 33
+ filters:
+ prefix_match:
+ - olcmtestcontainer2
+ blob_types:
+ - blockBlob
+ - appendBlob
+ blob_index_match:
+ - name: tags3
+ op: '=='
+ value: value3
+
+- name: Delete management policy rules
+ azure_rm_storageaccountmanagementpolicy:
+ resource_group: "{{ resource_group }}"
+ storage_account_name: "st{{ rpfx }}"
+ state: absent
+'''
+
+
+RETURN = '''
+state:
+ description:
+ - The data policy rules associated with the specified storage account.
+ returned: always
+ type: complex
+ contains:
+ id:
+ description:
+ - The data policy's ID.
+ returned: always
+ type: str
+ sample: "/subscriptions/xxx-xxx/resourceGroups/testRG/providers/Microsoft.Storage/storageAccounts/sttest/managementPolicies/default"
+ resource_group:
+ description:
+ - The resource group name.
+ returned: always
+ type: str
+ sample: testRG
+ storage_account_name:
+ description:
+ - The storage account name.
+ returned: always
+ type: str
+ sample: teststname
+ type:
+ description:
+ - The type of the resource.
+ returned: always
+ type: str
+ sample: "Microsoft.Storage/storageAccounts/managementPolicies"
+ last_modified_time:
+ description:
+ - Returns the date and time the ManagementPolicies was last modified.
+ returned: always
+ type: str
+ sample: "2024-04-12T11:40:10.376465+00:00"
+ name:
+ description:
+ - The name of the resource.
+ returned: always
+ type: str
+ sample: DefaultManagementPolicy
+ policy:
+ description:
+ - The Storage Account ManagementPolicy.
+ returned: always
+ type: complex
+ contains:
+ rules:
+ description:
+ - The Storage Account ManagementPolicies Rules.
+ type: list
+ returned: always
+ sample: [
+ {
+ "definition": {
+ "actions": {
+ "base_blob": {
+ "delete": {
+ "days_after_last_access_time_greater_than": 33.0,
+ "days_after_modification_greater_than": 33.0
+ },
+ "enable_auto_tier_to_hot_from_cool": true,
+ "tier_to_archive": {
+ "days_after_last_access_time_greater_than": 33.0,
+ "days_after_modification_greater_than": 33.0
+ },
+ "tier_to_cool": {
+ "days_after_last_access_time_greater_than": 33.0,
+ "days_after_modification_greater_than": 33.0
+ }
+ },
+ "snapshot": {
+ "delete": {
+ "days_after_creation_greater_than": 33.0
+ },
+ "tier_to_archive": {
+ "days_after_creation_greater_than": 33.0
+ },
+ "tier_to_cool": {
+ "days_after_creation_greater_than": 33.0
+ }
+ },
+ "version": {
+ "delete": {
+ "days_after_creation_greater_than": 33.0
+ },
+ "tier_to_archive": {
+ "days_after_creation_greater_than": 33.0
+ },
+ "tier_to_cool": {
+ "days_after_creation_greater_than": 33.0
+ }
+ }
+ },
+ "filters": {
+ "blob_index_match": [
+ {
+ "name": "tags3",
+ "op": "==",
+ "value": "value3"
+ }
+ ],
+ "blob_types": [
+ "blockBlob",
+ "appendBlob"
+ ],
+ "prefix_match": [
+ "olcmtestcontainer2"
+ ]
+ }
+ },
+ "enabled": false,
+ "name": "olcmtest5",
+ "type": "Lifecycle"
+ }
+ ]
+'''
+
+
+from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt
+try:
+ from azure.core.exceptions import ResourceNotFoundError
+except Exception:
+ # This is handled in azure_rm_common
+ pass
+
+
+class AzureRMStorageAccountManagementPolicy(AzureRMModuleBaseExt):
+
+ def __init__(self):
+
+ self.module_arg_spec = dict(
+ resource_group=dict(required=True, type='str', aliases=['resource_group_name']),
+ storage_account_name=dict(type='str', required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ rules=dict(
+ type='list',
+ elements='dict',
+ options=dict(
+ enabled=dict(type='bool'),
+ name=dict(type='str', required=True),
+ type=dict(type='str', required=True, choices=['Lifecycle']),
+ definition=dict(
+ type='dict',
+ required=True,
+ options=dict(
+ actions=dict(
+ type='dict',
+ required=True,
+ options=dict(
+ base_blob=dict(
+ type='dict',
+ options=dict(
+ tier_to_cool=dict(
+ type='dict',
+ options=dict(
+ days_after_modification_greater_than=dict(type='float'),
+ days_after_last_access_time_greater_than=dict(type='float')
+ )
+ ),
+ tier_to_archive=dict(
+ type='dict',
+ options=dict(
+ days_after_modification_greater_than=dict(type='float'),
+ days_after_last_access_time_greater_than=dict(type='float')
+ )
+ ),
+ delete=dict(
+ type='dict',
+ options=dict(
+ days_after_modification_greater_than=dict(type='float'),
+ days_after_last_access_time_greater_than=dict(type='float')
+ )
+ ),
+ enable_auto_tier_to_hot_from_cool=dict(type='bool')
+ )
+ ),
+ snapshot=dict(
+ type='dict',
+ options=dict(
+ tier_to_cool=dict(
+ type='dict',
+ options=dict(
+ days_after_creation_greater_than=dict(type='float', required=True)
+ )
+ ),
+ tier_to_archive=dict(
+ type='dict',
+ options=dict(
+ days_after_creation_greater_than=dict(type='float', required=True)
+ )
+ ),
+ delete=dict(
+ type='dict',
+ options=dict(
+ days_after_creation_greater_than=dict(type='float', required=True)
+ )
+ )
+ )
+ ),
+ version=dict(
+ type='dict',
+ options=dict(
+ tier_to_cool=dict(
+ type='dict',
+ options=dict(
+ days_after_creation_greater_than=dict(
+ type='float',
+ required=True
+ )
+ )
+ ),
+ tier_to_archive=dict(
+ type='dict',
+ options=dict(
+ days_after_creation_greater_than=dict(
+ type='float',
+ required=True
+ )
+ )
+ ),
+ delete=dict(
+ type='dict',
+ options=dict(
+ days_after_creation_greater_than=dict(
+ type='float',
+ required=True
+ )
+ )
+ )
+ )
+ )
+ )
+ ),
+ filters=dict(
+ type='dict',
+ options=dict(
+ prefix_match=dict(type='list', elements='str'),
+ blob_types=dict(type='list', elements='str', choices=['blockBlob', 'appendBlob'], required=True),
+ blob_index_match=dict(
+ type='list',
+ elements='dict',
+ options=dict(
+ name=dict(type='str', required=True),
+ op=dict(type='str', required=True),
+ value=dict(type='str', required=True)
+ )
+ )
+ )
+ )
+ )
+ )
+ )
+ )
+ )
+
+ self.results = dict(
+ changed=False,
+ state=dict()
+ )
+
+ self.resource_group = None
+ self.storage_account_name = None
+ self.state = None
+ self.rules = []
+
+ super(AzureRMStorageAccountManagementPolicy, self).__init__(self.module_arg_spec,
+ supports_tags=False,
+ supports_check_mode=True)
+
+ def exec_module(self, **kwargs):
+
+ for key in list(self.module_arg_spec.keys()):
+ setattr(self, key, kwargs[key])
+
+ managed_policy = self.get_management_policy()
+ changed = False
+
+ if self.state == 'present':
+ if managed_policy is not None:
+ rules = []
+ for item in managed_policy['policy']['rules']:
+ rules.append(item)
+ rules_name = [item['name'] for item in rules]
+ for item in self.rules:
+ if item['name'] in rules_name:
+ for tt in managed_policy['policy']['rules']:
+ if item['name'] == tt['name']:
+ old_item = tt
+ if not self.default_compare({}, item, old_item, '', dict(compare=[])):
+ rules.remove(old_item)
+ rules.append(item)
+ changed = True
+ else:
+ rules.append(item)
+ changed = True
+ if changed and not self.check_mode:
+ self.create_or_update_management_policy(rules)
+ else:
+ changed = True
+ if not self.check_mode:
+ self.create_or_update_management_policy(self.rules)
+ else:
+ if managed_policy is not None:
+ changed = True
+ if not self.check_mode:
+ self.delete_management_policy()
+
+ self.results['state'] = self.get_management_policy()
+ self.results['changed'] = changed
+
+ return self.results
+
+ def get_management_policy(self):
+ self.log('Get info for storage account management policy')
+
+ response = None
+ try:
+ response = self.storage_client.management_policies.get(self.resource_group, self.storage_account_name, 'default')
+ except ResourceNotFoundError as ec:
+ self.log("Failed to obtain the storage acount management policy, detail as {0}".format(ec))
+ return None
+ return self.format_to_dict(response)
+
+ def create_or_update_management_policy(self, rules):
+ self.log("Creating or updating storage account mangement policy")
+
+ try:
+ self.storage_client.management_policies.create_or_update(resource_group_name=self.resource_group,
+ account_name=self.storage_account_name,
+ management_policy_name='default',
+ properties=dict(policy=dict(rules=rules)))
+ except Exception as e:
+ self.log('Error creating or updating storage account management policy.')
+ self.fail("Failed to create or updating storage account management policy: {0}".format(str(e)))
+ return self.get_management_policy()
+
+ def delete_management_policy(self):
+ try:
+ self.storage_client.management_policies.delete(self.resource_group, self.storage_account_name, 'default')
+ except Exception as e:
+ self.fail("Failed to delete the storage account management policy: {0}".format(str(e)))
+
+ def format_to_dict(self, obj):
+ result = dict()
+ result['id'] = obj.id
+ result['resource_group'] = self.resource_group
+ result['storage_account_name'] = self.storage_account_name
+ result['name'] = obj.name
+ result['type'] = obj.type
+ result['last_modified_time'] = obj.last_modified_time
+ result['policy'] = dict(rules=[])
+ if obj.policy is not None:
+ result['policy'] = obj.policy.as_dict()
+
+ return result
+
+
+def main():
+ AzureRMStorageAccountManagementPolicy()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_storageaccountmanagementpolicy_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_storageaccountmanagementpolicy_info.py
new file mode 100644
index 000000000..1ffa1d21f
--- /dev/null
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_storageaccountmanagementpolicy_info.py
@@ -0,0 +1,245 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2024 xuzhang3 (@xuzhang3), Fred-sun (@Fred-sun)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_storageaccountmanagementpolicy_info
+
+version_added: "2.4.0"
+
+short_description: Get the data policy rules associated with the specified storage account
+
+description:
+ - Get the data policy rules associated with the specified storage account.
+
+options:
+ resource_group:
+ description:
+ - Name of the resource group to use.
+ required: true
+ type: str
+ aliases:
+ - resource_group_name
+ storage_account_name:
+ description:
+ - Name of the storage account to update or create.
+ type: str
+ required: true
+
+extends_documentation_fragment:
+ - azure.azcollection.azure
+
+author:
+ - xuzhang3 (@xuzhang3)
+ - Fred-sun (@Fred-sun)
+'''
+
+EXAMPLES = '''
+- name: Get the data policy rules associated with the specified storage account
+ azure_rm_storageaccountmanagementpolicy_info:
+ resource_group: myResourceGroup
+ storage_account_name: testaccount
+'''
+
+
+RETURN = '''
+state:
+ description:
+ - The data policy rules associated with the specified storage account.
+ returned: always
+ type: complex
+ contains:
+ id:
+ description:
+ - The data policy's ID.
+ returned: always
+ type: str
+ sample: "/subscriptions/xxx-xxx/resourceGroups/testRG/providers/Microsoft.Storage/storageAccounts/sttest/managementPolicies/default"
+ resource_group:
+ description:
+ - The resource group name.
+ returned: always
+ type: str
+ sample: testRG
+ storage_account_name:
+ description:
+ - The storage account name.
+ returned: always
+ type: str
+ sample: teststname
+ type:
+ description:
+ - The type of the resource.
+ returned: always
+ type: str
+ sample: "Microsoft.Storage/storageAccounts/managementPolicies"
+ last_modified_time:
+ description:
+ - Returns the date and time the ManagementPolicies was last modified.
+ returned: always
+ type: str
+ sample: "2024-04-12T11:40:10.376465+00:00"
+ name:
+ description:
+ - The name of the resource.
+ returned: always
+ type: str
+ sample: DefaultManagementPolicy
+ policy:
+ description:
+ - The Storage Account ManagementPolicy.
+ returned: always
+ type: complex
+ contains:
+ rules:
+ description:
+ - The Storage Account ManagementPolicies Rules.
+ type: list
+ returned: always
+ sample: [
+ {
+ "definition": {
+ "actions": {
+ "base_blob": {
+ "delete": {
+ "days_after_last_access_time_greater_than": 33.0,
+ "days_after_modification_greater_than": 33.0
+ },
+ "enable_auto_tier_to_hot_from_cool": true,
+ "tier_to_archive": {
+ "days_after_last_access_time_greater_than": 33.0,
+ "days_after_modification_greater_than": 33.0
+ },
+ "tier_to_cool": {
+ "days_after_last_access_time_greater_than": 33.0,
+ "days_after_modification_greater_than": 33.0
+ }
+ },
+ "snapshot": {
+ "delete": {
+ "days_after_creation_greater_than": 33.0
+ },
+ "tier_to_archive": {
+ "days_after_creation_greater_than": 33.0
+ },
+ "tier_to_cool": {
+ "days_after_creation_greater_than": 33.0
+ }
+ },
+ "version": {
+ "delete": {
+ "days_after_creation_greater_than": 33.0
+ },
+ "tier_to_archive": {
+ "days_after_creation_greater_than": 33.0
+ },
+ "tier_to_cool": {
+ "days_after_creation_greater_than": 33.0
+ }
+ }
+ },
+ "filters": {
+ "blob_index_match": [
+ {
+ "name": "tags3",
+ "op": "==",
+ "value": "value3"
+ }
+ ],
+ "blob_types": [
+ "blockBlob",
+ "appendBlob"
+ ],
+ "prefix_match": [
+ "olcmtestcontainer2"
+ ]
+ }
+ },
+ "enabled": false,
+ "name": "olcmtest5",
+ "type": "Lifecycle"
+ }
+ ]
+'''
+
+
+from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
+try:
+ from azure.core.exceptions import ResourceNotFoundError
+except Exception:
+ # This is handled in azure_rm_common
+ pass
+
+
+class AzureRMStorageAccountManagementPolicyInfo(AzureRMModuleBase):
+
+ def __init__(self):
+
+ self.module_arg_spec = dict(
+ resource_group=dict(required=True, type='str', aliases=['resource_group_name']),
+ storage_account_name=dict(type='str', required=True),
+ )
+
+ self.results = dict(
+ changed=False,
+ state=dict()
+ )
+
+ self.resource_group = None
+ self.storage_account_name = None
+ self.state = None
+ self.rules = None
+
+ super(AzureRMStorageAccountManagementPolicyInfo, self).__init__(self.module_arg_spec,
+ supports_tags=False,
+ supports_check_mode=True)
+
+ def exec_module(self, **kwargs):
+
+ for key in list(self.module_arg_spec.keys()):
+ setattr(self, key, kwargs[key])
+
+ self.results['state'] = self.get_management_policy()
+
+ return self.results
+
+ def get_management_policy(self):
+ self.log('Get info for storage account management policy')
+
+ response = None
+ try:
+ response = self.storage_client.management_policies.get(self.resource_group, self.storage_account_name, 'default')
+ except ResourceNotFoundError as ec:
+ self.log("Failed to obtain the storage acount management policy, detail as {0}".format(ec))
+ return
+
+ return self.format_to_dict(response)
+
+ def format_to_dict(self, obj):
+ result = dict()
+ result['id'] = obj.id
+ result['resource_group'] = self.resource_group
+ result['storage_account_name'] = self.storage_account_name
+ result['name'] = obj.name
+ result['type'] = obj.type
+ result['last_modified_time'] = obj.last_modified_time
+ result['policy'] = dict(rules=[])
+ if obj.policy is not None:
+ result['policy'] = obj.policy.as_dict()
+
+ return result
+
+
+def main():
+ AzureRMStorageAccountManagementPolicyInfo()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachine.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachine.py
index e845e2fa1..d11dbd185 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachine.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachine.py
@@ -221,6 +221,7 @@ options:
- Type of OS disk caching.
type: str
choices:
+ - None
- ReadOnly
- ReadWrite
aliases:
@@ -270,6 +271,11 @@ options:
description:
- ID of disk encryption set for data disk.
type: str
+ managed_disk_id:
+ description:
+ - The ID of the existing data disk.
+ - If specified, attach mode will be chosen.
+ type: str
managed_disk_type:
description:
- Managed data disk type.
@@ -313,6 +319,7 @@ options:
- Type of data disk caching.
type: str
choices:
+ - None
- ReadOnly
- ReadWrite
public_ip_allocation_method:
@@ -1145,7 +1152,7 @@ class AzureRMVirtualMachine(AzureRMModuleBase):
storage_account_name=dict(type='str', aliases=['storage_account']),
storage_container_name=dict(type='str', aliases=['storage_container'], default='vhds'),
storage_blob_name=dict(type='str', aliases=['storage_blob']),
- os_disk_caching=dict(type='str', aliases=['disk_caching'], choices=['ReadOnly', 'ReadWrite']),
+ os_disk_caching=dict(type='str', aliases=['disk_caching'], choices=['None', 'ReadOnly', 'ReadWrite']),
os_disk_size_gb=dict(type='int'),
os_disk_encryption_set=dict(type='str'),
managed_disk_type=dict(type='str', choices=['Standard_LRS', 'StandardSSD_LRS', 'StandardSSD_ZRS', 'Premium_LRS', 'Premium_ZRS', 'UltraSSD_LRS']),
@@ -1181,12 +1188,13 @@ class AzureRMVirtualMachine(AzureRMModuleBase):
lun=dict(type='int', required=True),
disk_size_gb=dict(type='int'),
disk_encryption_set=dict(type='str'),
+ managed_disk_id=dict(type='str'),
managed_disk_type=dict(type='str', choices=['Standard_LRS', 'StandardSSD_LRS',
'StandardSSD_ZRS', 'Premium_LRS', 'Premium_ZRS', 'UltraSSD_LRS']),
storage_account_name=dict(type='str'),
storage_container_name=dict(type='str', default='vhds'),
storage_blob_name=dict(type='str'),
- caching=dict(type='str', choices=['ReadOnly', 'ReadWrite'])
+ caching=dict(type='str', choices=['None', 'ReadOnly', 'ReadWrite'])
)
),
plan=dict(type='dict'),
@@ -1965,41 +1973,49 @@ class AzureRMVirtualMachine(AzureRMModuleBase):
count = 0
for data_disk in self.data_disks:
- if not data_disk.get('managed_disk_type'):
- if not data_disk.get('storage_blob_name'):
- data_disk['storage_blob_name'] = self.name + '-data-' + str(count) + '.vhd'
- count += 1
+ data_disk_vhd = None
+ disk_name = None
- if data_disk.get('storage_account_name'):
- data_disk_storage_account = self.get_storage_account(self.resource_group, data_disk['storage_account_name'])
- else:
- data_disk_storage_account = self.create_default_storage_account()
- self.log("data disk storage account:")
- self.log(self.serialize_obj(data_disk_storage_account, 'StorageAccount'), pretty_print=True)
-
- if not data_disk.get('storage_container_name'):
- data_disk['storage_container_name'] = 'vhds'
-
- data_disk_requested_vhd_uri = 'https://{0}.blob.{1}/{2}/{3}'.format(
- data_disk_storage_account.name,
- self._cloud_environment.suffixes.storage_endpoint,
- data_disk['storage_container_name'],
- data_disk['storage_blob_name']
- )
-
- if not data_disk.get('managed_disk_type'):
- data_disk_managed_disk = None
- disk_name = data_disk['storage_blob_name']
- data_disk_vhd = self.compute_models.VirtualHardDisk(uri=data_disk_requested_vhd_uri)
+ if data_disk.get('managed_disk_id'):
+ create_option = self.compute_models.DiskCreateOptionTypes.attach
+ data_disk_managed_disk = self.compute_models.ManagedDiskParameters(id=data_disk.get('managed_disk_id'))
else:
- data_disk_vhd = None
- data_disk_managed_disk = self.compute_models.ManagedDiskParameters(storage_account_type=data_disk['managed_disk_type'])
- if data_disk.get('disk_encryption_set'):
- data_disk_managed_disk.disk_encryption_set = self.compute_models.DiskEncryptionSetParameters(
- id=data_disk['disk_encryption_set']
+ create_option = self.compute_models.DiskCreateOptionTypes.empty
+
+ if not data_disk.get('managed_disk_type'):
+ if not data_disk.get('storage_blob_name'):
+ data_disk['storage_blob_name'] = self.name + '-data-' + str(count) + '.vhd'
+ count += 1
+
+ if data_disk.get('storage_account_name'):
+ data_disk_storage_account = self.get_storage_account(self.resource_group, data_disk['storage_account_name'])
+ else:
+ data_disk_storage_account = self.create_default_storage_account()
+ self.log("data disk storage account:")
+ self.log(self.serialize_obj(data_disk_storage_account, 'StorageAccount'), pretty_print=True)
+
+ if not data_disk.get('storage_container_name'):
+ data_disk['storage_container_name'] = 'vhds'
+
+ data_disk_requested_vhd_uri = 'https://{0}.blob.{1}/{2}/{3}'.format(
+ data_disk_storage_account.name,
+ self._cloud_environment.suffixes.storage_endpoint,
+ data_disk['storage_container_name'],
+ data_disk['storage_blob_name']
)
- disk_name = self.name + "-datadisk-" + str(count)
- count += 1
+
+ if not data_disk.get('managed_disk_type'):
+ data_disk_managed_disk = None
+ disk_name = data_disk['storage_blob_name']
+ data_disk_vhd = self.compute_models.VirtualHardDisk(uri=data_disk_requested_vhd_uri)
+ else:
+ data_disk_managed_disk = self.compute_models.ManagedDiskParameters(storage_account_type=data_disk['managed_disk_type'])
+ if data_disk.get('disk_encryption_set'):
+ data_disk_managed_disk.disk_encryption_set = self.compute_models.DiskEncryptionSetParameters(
+ id=data_disk['disk_encryption_set']
+ )
+ disk_name = self.name + "-datadisk-" + str(count)
+ count += 1
data_disk['caching'] = data_disk.get(
'caching', 'ReadOnly'
@@ -2010,7 +2026,7 @@ class AzureRMVirtualMachine(AzureRMModuleBase):
name=disk_name,
vhd=data_disk_vhd,
caching=data_disk['caching'],
- create_option=self.compute_models.DiskCreateOptionTypes.empty,
+ create_option=create_option,
disk_size_gb=data_disk['disk_size_gb'],
managed_disk=data_disk_managed_disk,
))
@@ -2941,8 +2957,6 @@ class AzureRMVirtualMachine(AzureRMModuleBase):
pip = self.network_models.PublicIPAddress(id=pip_facts.id, location=pip_facts.location, resource_guid=pip_facts.resource_guid, sku=sku)
self.tags['_own_pip_'] = self.name + '01'
- self.tags['_own_nsg_'] = self.name + '01'
-
parameters = self.network_models.NetworkInterface(
location=self.location,
ip_configurations=[
@@ -2961,6 +2975,7 @@ class AzureRMVirtualMachine(AzureRMModuleBase):
parameters.network_security_group = self.network_models.NetworkSecurityGroup(id=group.id,
location=group.location,
resource_guid=group.resource_guid)
+ self.tags['_own_nsg_'] = self.name + '01'
parameters.ip_configurations[0].public_ip_address = pip
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinescalesetinstance.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinescalesetinstance.py
index 402af0072..97878dcf4 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinescalesetinstance.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinescalesetinstance.py
@@ -295,16 +295,35 @@ class AzureRMVirtualMachineScaleSetInstance(AzureRMModuleBase):
def format_response(self, item):
d = item.as_dict()
- iv = self.mgmt_client.virtual_machine_scale_set_vms.get_instance_view(resource_group_name=self.resource_group,
- vm_scale_set_name=self.vmss_name,
- instance_id=d.get('instance_id', None)).as_dict()
- power_state = ""
- for index in range(len(iv['statuses'])):
- code = iv['statuses'][index]['code'].split('/')
- if code[0] == 'PowerState':
- power_state = code[1]
- break
- d = {
+ instance = None
+ power_state = ''
+ if d.get('provisioning_state', None) is not None:
+ iv = self.mgmt_client.virtual_machine_scale_set_vms.get_instance_view(resource_group_name=self.resource_group,
+ vm_scale_set_name=self.vmss_name,
+ instance_id=d.get('instance_id', None)).as_dict()
+ for index in range(len(iv['statuses'])):
+ code = iv['statuses'][index]['code'].split('/')
+ if code[0] == 'PowerState':
+ power_state = code[1]
+ break
+ else:
+ try:
+ instance = self.compute_client.virtual_machines.instance_view(self.resource_group, d.get('instance_id', None)).as_dict()
+ vm_instance = self.compute_client.virtual_machines.get(self.resource_group, d.get('instance_id', None)).as_dict()
+ except Exception as exc:
+ self.fail("Getting Flexible VMSS instance instance failed, name {0} instance view - {1}".format(d.get('instance_id'), str(exc)))
+
+ for index in range(len(instance['statuses'])):
+ code = instance['statuses'][index]['code'].split('/')
+ if code[0] == 'PowerState':
+ power_state = code[1]
+ elif code[0] == 'OSState' and code[1] == 'generalized':
+ power_state = 'generalized'
+ break
+ elif code[0] == 'ProvisioningState' and code[1] == 'failed':
+ power_state = ''
+ break
+ dd = {
'id': d.get('id'),
'tags': d.get('tags'),
'instance_id': d.get('instance_id'),
@@ -312,7 +331,9 @@ class AzureRMVirtualMachineScaleSetInstance(AzureRMModuleBase):
'power_state': power_state,
'protection_policy': d.get('protection_policy')
}
- return d
+ if d.get('provisioning_state') is None:
+ dd['tags'] = vm_instance.get('tags', None)
+ return dd
def main():
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinescalesetinstance_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinescalesetinstance_info.py
index 47a3d3318..ba94461cb 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinescalesetinstance_info.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinescalesetinstance_info.py
@@ -211,16 +211,35 @@ class AzureRMVirtualMachineScaleSetVMInfo(AzureRMModuleBase):
def format_response(self, item):
d = item.as_dict()
- iv = self.mgmt_client.virtual_machine_scale_set_vms.get_instance_view(resource_group_name=self.resource_group,
- vm_scale_set_name=self.vmss_name,
- instance_id=d.get('instance_id', None)).as_dict()
- power_state = ""
- for index in range(len(iv['statuses'])):
- code = iv['statuses'][index]['code'].split('/')
- if code[0] == 'PowerState':
- power_state = code[1]
- break
- d = {
+ instance = None
+ power_state = ''
+ if d.get('provisioning_state') is not None:
+ iv = self.mgmt_client.virtual_machine_scale_set_vms.get_instance_view(resource_group_name=self.resource_group,
+ vm_scale_set_name=self.vmss_name,
+ instance_id=d.get('instance_id', None)).as_dict()
+ for index in range(len(iv['statuses'])):
+ code = iv['statuses'][index]['code'].split('/')
+ if code[0] == 'PowerState':
+ power_state = code[1]
+ break
+ else:
+ try:
+ instance = self.compute_client.virtual_machines.instance_view(self.resource_group, d.get('instance_id', None)).as_dict()
+ vm_instance = self.compute_client.virtual_machines.get(self.resource_group, d.get('instance_id', None)).as_dict()
+ except Exception as exc:
+ self.fail("Getting Flexible VMSS instance instance failed, name {0} instance view - {1}".format(d.get('instance_id'), str(exc)))
+
+ for index in range(len(instance['statuses'])):
+ code = instance['statuses'][index]['code'].split('/')
+ if code[0] == 'PowerState':
+ power_state = code[1]
+ elif code[0] == 'OSState' and code[1] == 'generalized':
+ power_state = 'generalized'
+ break
+ elif code[0] == 'ProvisioningState' and code[1] == 'failed':
+ power_state = ''
+ break
+ dd = {
'resource_group': self.resource_group,
'id': d.get('id', None),
'tags': d.get('tags', None),
@@ -230,10 +249,17 @@ class AzureRMVirtualMachineScaleSetVMInfo(AzureRMModuleBase):
'provisioning_state': d.get('provisioning_state', None),
'power_state': power_state,
'vm_id': d.get('vm_id', None),
- 'image_reference': d.get('storage_profile').get('image_reference', None),
- 'computer_name': d.get('os_profile').get('computer_name', None)
}
- return d
+ if d.get('provisioning_state') is not None:
+ dd['image_reference'] = d.get('storage_profile').get('image_reference', None)
+ dd['computer_name'] = d.get('os_profile').get('computer_name', None)
+ else:
+ dd['image_reference'] = vm_instance.get('storage_profile').get('image_reference', None)
+ dd['computer_name'] = vm_instance.get('os_profile').get('computer_name', None)
+ dd['provisioning_state'] = vm_instance.get('provisioning_state', None)
+ dd['tags'] = vm_instance.get('tags', None)
+ dd['vm_id'] = vm_instance.get('vm_id')
+ return dd
def main():
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualnetworkgatewaynatrule.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualnetworkgatewaynatrule.py
new file mode 100644
index 000000000..51b7da15e
--- /dev/null
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualnetworkgatewaynatrule.py
@@ -0,0 +1,354 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2024 xuzhang3 (@xuzhang3), Fred-sun (@Fred-sun)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_virtualnetworkgatewaynatrule
+
+version_added: "2.4.0"
+
+short_description: Gets or list the specified local network gateway in a resource group
+
+description:
+ - Gets or list the specified local network gateway in a resource group.
+
+options:
+ resource_group:
+ description:
+ - The local network gateway's resource group.
+ type: str
+ required: true
+ virtual_network_gateway_name:
+ description:
+ - The name of the local network gateway.
+ type: str
+ required: true
+ name:
+ description:
+ - he name of the resource that is unique within a resource group.
+ type: str
+ required: true
+ type_properties_type:
+ description:
+ - The type of NAT rule for VPN NAT.
+ type: str
+ choices:
+ - Dynamic
+ - Static
+ mode:
+ description:
+ - The Source NAT direction of a VPN NAT.
+ type: str
+ choices:
+ - EgressSnat
+ - IngressSnat
+ ip_configuration_id:
+ description:
+ - The IP Configuration ID this NAT rule applies to.
+ type: str
+ external_mappings:
+ description:
+ - The private IP address external mapping for NAT.
+ type: list
+ elements: str
+ internal_mappings:
+ description:
+ - The private IP address internal mapping for NAT.
+ type: list
+ elements: str
+ state:
+ description:
+ - Use C(present) to create or update the virtual network gateway nat rule.
+ - Use C(absent) to delete the nat rule.
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+extends_documentation_fragment:
+ - azure.azcollection.azure
+ - azure.azcollection.azure_tags
+
+author:
+ - xuzhang3 (@xuzhang3)
+ - Fred Sun (@Fred-sun)
+'''
+
+EXAMPLES = '''
+- name: Create a virtual netowrk nat rule
+ azure_rm_virtualnetworkgatewaynatrule:
+ resource_group: "{{ resource_group }}"
+ virtual_network_gateway_name: "{{ vngname }}"
+ name: "{{ natrulename }}"
+ type_properties_type: Dynamic
+ ip_configuration_id: "/subscriptions/xxx-xxx/resourceGroups/testRG/providers/Microsoft.Network/virtualNetworkGateways/testRG/ipConfigurations/ipconfig"
+ mode: EgressSnat
+ internal_mappings:
+ - 10.1.0.0/24
+ external_mappings:
+ - 192.168.1.0/24
+
+- name: Delete the virtual netowrk nat rule
+ azure_rm_virtualnetworkgatewaynatrule:
+ resource_group: "{{ resource_group }}"
+ virtual_network_gateway_name: "{{ vngname }}"
+ name: "{{ natrulename }}"
+ state: absent
+'''
+
+RETURN = '''
+state:
+ description:
+ - Gets the nat rules for a particular virtual network gateway
+ returned: always
+ type: complex
+ contains:
+ id:
+ description:
+ - The resource ID.
+ type: str
+ returned: always
+ sample: "/subscriptions/xxx-xxx/resourceGroups/testRG/providers/Microsoft.Network/virtualNetworkGateways/vng01/natRules/natrule"
+ internal_mappings:
+ description:
+ - The private IP address internal mapping for NAT.
+ type: list
+ returned: always
+ sample: ["10.1.0.0/24"]
+ external_mappings:
+ description:
+ - The private IP address external mapping for NAT.
+ type: list
+ returned: always
+ sample: ["192.168.1.0/24"]
+ ip_configuration_id:
+ description:
+ - he IP Configuration ID this NAT rule applies to.
+ type: str
+ returned: always
+ sample: "/subscriptions/subid/resourceGroups/rg1/providers/Microsoft.Network/virtualNetworkGateways/gateway1/ipConfigurations/default"
+ type_properties_type:
+ description:
+ - The type of NAT rule for VPN NAT.
+ type: str
+ returned: always
+ sample: Static
+ mode:
+ description:
+ - The Source NAT direction of a VPN NAT.
+ type: str
+ returned: always
+ sample: EgressSnat
+ name:
+ description:
+ - The resource name.
+ type: str
+ returned: always
+ sample: natrule_name
+ resource_group:
+ description:
+ - The resource group name.
+ type: str
+ returned: always
+ sample: testRG
+ etag:
+ description:
+ - A unique read-only string that changes whenever the resource is updated.
+ type: str
+ returned: always
+ sample: b5a32693-2e75-49e0-9137-ded19db658d6
+ provisioning_state:
+ description:
+ - The provisioning state of the nat rule resource.
+ type: str
+ returned: always
+ sample: Succeeded
+ type:
+ description:
+ - The resource type.
+ type: str
+ returned: always
+ sample: Microsoft.Network/virtualNetworkGateways/natRules
+'''
+
+from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from azure.core.exceptions import HttpResponseError
+ from azure.core.polling import LROPoller
+except Exception:
+ # handled in azure_rm_common
+ pass
+
+
+class AzureRMVirtualNetworkNatGateway(AzureRMModuleBase):
+ """Utility class to get Azure Kubernetes Service Credentials facts"""
+
+ def __init__(self):
+
+ self.module_arg_spec = dict(
+ name=dict(type='str', required=True),
+ resource_group=dict(type='str', required=True),
+ virtual_network_gateway_name=dict(type='str', required=True),
+ type_properties_type=dict(type='str', choices=['Dynamic', 'Static']),
+ mode=dict(type='str', choices=['EgressSnat', 'IngressSnat']),
+ ip_configuration_id=dict(type='str'),
+ external_mappings=dict(type='list', elements='str'),
+ internal_mappings=dict(type='list', elements='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent'])
+ )
+
+ self.type_properties_type = None
+ self.mode = None
+ self.ip_configuration_id = None
+ self.external_mappings = None
+ self.internal_mappings = None
+
+ self.results = dict(
+ changed=False,
+ state=[],
+ )
+ required_if = [('type_properties_type', 'Dynamic', ['ip_configuration_id'])]
+
+ super(AzureRMVirtualNetworkNatGateway, self).__init__(derived_arg_spec=self.module_arg_spec,
+ supports_check_mode=True,
+ required_if=required_if,
+ supports_tags=True,
+ facts_module=False)
+
+ def exec_module(self, **kwargs):
+
+ for key in list(self.module_arg_spec):
+ setattr(self, key, kwargs[key])
+
+ old_response = self.get_nat_rule()
+ changed = False
+ response = None
+
+ if self.state == 'present':
+ if old_response is not None:
+ if self.type_properties_type is not None and self.type_properties_type != old_response['type_properties_type']:
+ self.fail("NAT type_properties_type cannot be changed.")
+ else:
+ self.type_properties_type = old_response['type_properties_type']
+ if self.mode is not None and self.mode != old_response['mode']:
+ self.fail("NAT mode cannot be changed.")
+ else:
+ self.mode = old_response['mode']
+ if self.ip_configuration_id is not None and self.ip_configuration_id != old_response['ip_configuration_id']:
+ changed = True
+ else:
+ self.ip_configuration_id = old_response['ip_configuration_id']
+ if self.internal_mappings is not None and old_response['internal_mappings'] != self.internal_mappings:
+ changed = True
+ else:
+ self.internal_mappings = old_response['internal_mappings']
+
+ if self.external_mappings is not None and self.external_mappings != old_response['external_mappings']:
+ changed = True
+ else:
+ self.external_mappings = old_response['external_mappings']
+ else:
+ changed = True
+
+ internal_mappings = None
+ external_mappings = None
+ if self.internal_mappings is not None:
+ internal_mappings = [self.network_models.VpnNatRuleMapping(address_space=item) for item in self.internal_mappings]
+ if self.external_mappings is not None:
+ external_mappings = [self.network_models.VpnNatRuleMapping(address_space=item) for item in self.external_mappings]
+
+ natrule_resource = self.network_models.VirtualNetworkGatewayNatRule(name=self.name,
+ type_properties_type=self.type_properties_type,
+ mode=self.mode,
+ ip_configuration_id=self.ip_configuration_id,
+ internal_mappings=internal_mappings,
+ external_mappings=external_mappings)
+ if changed:
+ if not self.check_mode:
+ response = self.create_or_update_local_network_gateway(natrule_resource)
+ else:
+ if not self.check_mode:
+ if old_response is not None:
+ self.delete_local_network_gateway()
+ changed = True
+ response = None
+ else:
+ changed = True
+
+ if response is None:
+ response = old_response
+ self.results['state'] = response
+ self.results['changed'] = changed
+ return self.results
+
+ def get_nat_rule(self):
+ """Gets the specified nat rule"""
+ response = None
+ try:
+ response = self.network_client.virtual_network_gateway_nat_rules.get(self.resource_group, self.virtual_network_gateway_name, self.name)
+ except HttpResponseError as ec:
+ self.log("Gets the specified local network gateway in a resource group Failed, Exception as {0}".format(ec))
+ return None
+ return self.format_response(response)
+
+ def create_or_update_local_network_gateway(self, body):
+ """Create or Update local network gateway"""
+ response = None
+ try:
+ response = self.network_client.virtual_network_gateway_nat_rules.begin_create_or_update(self.resource_group,
+ self.virtual_network_gateway_name, self.name, body)
+ if isinstance(response, LROPoller):
+ response = self.get_poller_result(response)
+ except HttpResponseError as ec:
+ self.fail("Create or Updated a local network gateway in a resource group Failed, Exception as {0}".format(ec))
+
+ return self.format_response(response)
+
+ def delete_local_network_gateway(self):
+ """Deletes the specified local network gateway"""
+ try:
+ self.network_client.virtual_network_gateway_nat_rules.begin_delete(self.resource_group, self.virtual_network_gateway_name, self.name)
+ except HttpResponseError as ec:
+ self.fail("Deletes the specified nat rule, Exception as {0}".format(ec))
+ return None
+
+ def format_response(self, item):
+ result = dict(
+ resource_group=self.resource_group,
+ id=item.id,
+ name=item.name,
+ type=item.type,
+ etag=item.etag,
+ provisioning_state=item.provisioning_state,
+ type_properties_type=item.type_properties_type,
+ mode=item.mode,
+ internal_mappings=list(),
+ external_mappings=list(),
+ ip_configuration_id=item.ip_configuration_id
+ )
+
+ if item.internal_mappings is not None:
+ for value in item.internal_mappings:
+ result['internal_mappings'].append(value.address_space)
+ if item.external_mappings is not None:
+ for value in item.external_mappings:
+ result['external_mappings'].append(value.address_space)
+ return result
+
+
+def main():
+ """Main module execution code path"""
+
+ AzureRMVirtualNetworkNatGateway()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualnetworkgatewaynatrule_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualnetworkgatewaynatrule_info.py
new file mode 100644
index 000000000..8aeb6649d
--- /dev/null
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualnetworkgatewaynatrule_info.py
@@ -0,0 +1,236 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2024 xuzhang3 (@xuzhang3), Fred-sun (@Fred-sun)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_virtualnetworkgatewaynatrule_info
+
+version_added: "2.4.0"
+
+short_description: Gets or list nat rules for a particular virtual network gateway
+
+description:
+ - Gets or list nat rules for a particular virtual network gateway.
+
+options:
+ resource_group:
+ description:
+ - The local network gateway's resource group.
+ type: str
+ required: true
+ virtual_network_gateway_name:
+ description:
+ - The name of the local network gateway.
+ type: str
+ required: true
+ name:
+ description:
+ - The name of the nat rule.
+ type: str
+
+extends_documentation_fragment:
+ - azure.azcollection.azure
+
+author:
+ - xuzhang3 (@xuzhang3)
+ - Fred Sun (@Fred-sun)
+'''
+
+EXAMPLES = '''
+- name: Gets the nat rule by the name
+ azure_rm_virtualnetworkgatewaynatrule_info:
+ resource_group: "{{ resource_group }}"
+ virtual_network_gateway_name: "{{ local_networkgateway_name }}"
+ name: "{{ name }}"
+
+- name: List all nat rules for a particular virtual network gateway
+ azure_rm_virtualnetworkgatewaynatrule_info:
+ resource_group: "{{ resource_group }}"
+ virtual_network_gateway_name: "{{ local_networkgateway_name }}"
+'''
+
+RETURN = '''
+state:
+ description:
+ - Gets the nat rules for a particular virtual network gateway
+ returned: always
+ type: complex
+ contains:
+ id:
+ description:
+ - The resource ID.
+ type: str
+ returned: always
+ sample: "/subscriptions/xxx-xxx/resourceGroups/testRG/providers/Microsoft.Network/virtualNetworkGateways/vng01/natRules/natrule"
+ internal_mappings:
+ description:
+ - The private IP address internal mapping for NAT.
+ type: list
+ returned: always
+ sample: ["10.1.0.0/24"]
+ external_mappings:
+ description:
+ - The private IP address external mapping for NAT.
+ type: list
+ returned: always
+ sample: ["192.168.1.0/24"]
+ ip_configuration_id:
+ description:
+ - he IP Configuration ID this NAT rule applies to.
+ type: str
+ returned: always
+ sample: "/subscriptions/subid/resourceGroups/rg1/providers/Microsoft.Network/virtualNetworkGateways/gateway1/ipConfigurations/default"
+ type_properties_type:
+ description:
+ - The type of NAT rule for VPN NAT.
+ type: str
+ returned: always
+ sample: Static
+ mode:
+ description:
+ - The Source NAT direction of a VPN NAT.
+ type: str
+ returned: always
+ sample: EgressSnat
+ name:
+ description:
+ - The resource name.
+ type: str
+ returned: always
+ sample: natrule_name
+ resource_group:
+ description:
+ - The resource group name.
+ type: str
+ returned: always
+ sample: testRG
+ etag:
+ description:
+ - A unique read-only string that changes whenever the resource is updated.
+ type: str
+ returned: always
+ sample: b5a32693-2e75-49e0-9137-ded19db658d6
+ provisioning_state:
+ description:
+ - The provisioning state of the nat rule resource.
+ type: str
+ returned: always
+ sample: Succeeded
+ type:
+ description:
+ - The resource type.
+ type: str
+ returned: always
+ sample: Microsoft.Network/virtualNetworkGateways/natRules
+'''
+
+from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from azure.core.exceptions import HttpResponseError
+except Exception:
+ # handled in azure_rm_common
+ pass
+
+
+class AzureRMVirtualNetworkGatewayNatRuleInfo(AzureRMModuleBase):
+ """Utility class to get Azure Kubernetes Service Credentials facts"""
+
+ def __init__(self):
+
+ self.module_args = dict(
+ name=dict(type='str'),
+ resource_group=dict(type='str', required=True),
+ virtual_network_gateway_name=dict(type='str', required=True),
+ )
+
+ self.name = None
+
+ self.results = dict(
+ changed=False,
+ state=[],
+ )
+
+ super(AzureRMVirtualNetworkGatewayNatRuleInfo, self).__init__(derived_arg_spec=self.module_args,
+ supports_check_mode=True,
+ supports_tags=False,
+ facts_module=True)
+
+ def exec_module(self, **kwargs):
+
+ for key in self.module_args:
+ setattr(self, key, kwargs[key])
+
+ if self.name is not None:
+ self.results['state'] = self.get_by_name()
+ else:
+ self.results['state'] = self.list_by_virtual_network_gateway()
+
+ return self.results
+
+ def get_by_name(self):
+ """Gets the nat rule by name"""
+ response = None
+
+ try:
+ response = self.network_client.virtual_network_gateway_nat_rules.get(self.resource_group, self.virtual_network_gateway_name, self.name)
+ except HttpResponseError as ec:
+ self.log("Gets the nat rule by name got a Exception, Exception as {0}".format(ec))
+ if response:
+ return [self.format_response(response)]
+ else:
+ return []
+
+ def list_by_virtual_network_gateway(self):
+ """Gets all the nat rule in the local network gateway"""
+ response = None
+ try:
+ response = self.network_client.virtual_network_gateway_nat_rules.list_by_virtual_network_gateway(self.resource_group,
+ self.virtual_network_gateway_name)
+ except HttpResponseError as ec:
+ self.log("Gets all nat rule by the local network gateway got Exception, Exception as {0}".format(ec))
+
+ if response:
+ return [self.format_response(item) for item in response]
+ else:
+ return []
+
+ def format_response(self, item):
+ result = dict(
+ resource_group=self.resource_group,
+ id=item.id,
+ name=item.name,
+ type=item.type,
+ etag=item.etag,
+ provisioning_state=item.provisioning_state,
+ type_properties_type=item.type_properties_type,
+ mode=item.mode,
+ internal_mappings=list(),
+ external_mappings=list(),
+ ip_configuration_id=item.ip_configuration_id
+ )
+
+ if item.internal_mappings is not None:
+ for value in item.internal_mappings:
+ result['internal_mappings'].append(value.address_space)
+ if item.external_mappings is not None:
+ for value in item.external_mappings:
+ result['external_mappings'].append(value.address_space)
+ return result
+
+
+def main():
+ """Main module execution code path"""
+
+ AzureRMVirtualNetworkGatewayNatRuleInfo()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_vmsku_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_vmsku_info.py
new file mode 100644
index 000000000..a367bd692
--- /dev/null
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_vmsku_info.py
@@ -0,0 +1,311 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2024
+# Nir Argaman <nargaman@redhat.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: azure_rm_vmsku_info
+
+version_added: "2.4.0"
+
+short_description: Get compute-related SKUs list
+
+description:
+ - Get details for compute-related resource SKUs.
+
+options:
+ location:
+ description:
+ - A region supported by current subscription.
+ type: str
+ resource_type:
+ description:
+ - Resource types e.g. "availabilitySets", "snapshots", "disks", etc.
+ type: str
+ size:
+ description:
+ - Size name, partial name is accepted.
+ type: str
+ zone:
+ description:
+ - Show skus supporting availability zones.
+ type: bool
+ default: False
+
+extends_documentation_fragment:
+ - azure.azcollection.azure
+
+author:
+ - Nir Argaman (@nirarg)
+
+'''
+
+EXAMPLES = '''
+- name: Gather Resource Group info
+ azure.azcollection.azure_rm_resourcegroup_info:
+ name: "{{ resource_group }}"
+ register: rg_info
+
+- name: List available VM SKUs
+ azure.azcollection.azure_rm_vmsku_info:
+ location: "{{ rg_info.resourcegroups.0.location }}"
+ resource_type: "virtualMachines"
+ size: "standard_B1"
+ zone: true
+ register: available_skus_result
+'''
+
+RETURN = '''
+available_skus:
+ description:
+ - List of compute-related resource SKUs.
+ returned: always
+ type: complex
+ contains:
+ resource_type:
+ description:
+ - The type of resource the SKU applies to.
+ returned: always
+ type: str
+ sample: "virtual_machine"
+ name:
+ description:
+ - The name of SKU.
+ returned: always
+ type: str
+ sample: "Standard_B1s"
+ tier:
+ description:
+ - Specifies the tier of virtual machines in a scale set.
+ returned: always
+ type: str
+ sample: "Standard"
+ size:
+ description:
+ - The Size of the SKU.
+ returned: always
+ type: str
+ sample: "B1s"
+ family:
+ description:
+ - The Family of this particular SKU.
+ returned: always
+ type: str
+ sample: "standardBSFamily"
+ locations:
+ description:
+ - The set of locations that the SKU is available.
+ returned: always
+ type: list
+ sample: ["eastus"]
+ location_info:
+ description:
+ - A list of locations and availability zones in those locations where the SKU is available.
+ returned: always
+ type: complex
+ contains:
+ location:
+ description:
+ - Location of the SKU.
+ type: str
+ returned: always
+ sample: "eastus"
+ zones:
+ description:
+ - List of availability zones where the SKU is supported.
+ type: list
+ returned: always
+ sample: ["1", "2", "3"]
+ zone_details:
+ description:
+ - Details of capabilities available to a SKU in specific zones.
+ returned: always
+ type: complex
+ contains:
+ capabilities:
+ description:
+ - A list of capabilities that are available for the SKU in the specified list of zones.
+ type: complex
+ returned: always
+ contains:
+ name:
+ description:
+ - An invariant to describe the feature.
+ type: str
+ returned: always
+ sample: "ultrassdavailable"
+ value:
+ description:
+ - An invariant if the feature is measured by quantity.
+ type: str
+ returned: always
+ sample: "True"
+ capabilities:
+ description:
+ - A name value pair to describe the capability.
+ returned: always
+ type: complex
+ contains:
+ name:
+ description:
+ - An invariant to describe the feature.
+ type: str
+ returned: always
+ sample: "ultrassdavailable"
+ value:
+ description:
+ - An invariant if the feature is measured by quantity.
+ type: str
+ returned: always
+ sample: "True"
+ restrictions:
+ description:
+ - The restrictions because of which SKU cannot be used. This is empty if there are no restrictions.
+ returned: always
+ type: complex
+ contains:
+ type:
+ description:
+ - The type of restrictions.
+ type: str
+ returned: always
+ sample: "location"
+ values:
+ description:
+ - The value of restrictions. If the restriction type is set to location. This would be different locations where the SKU is restricted.
+ type: str
+ returned: always
+ sample: ["eastus"]
+ restriction_info:
+ description:
+ - The information about the restriction where the SKU cannot be used.
+ returned: always
+ type: complex
+ contains:
+ locations:
+ description:
+ - Locations where the SKU is restricted.
+ type: list
+ sample: ["location"]
+ zones:
+ description:
+ - List of availability zones where the SKU is restricted.
+ type: list
+ sample: ["1", "2"]
+ reason_code:
+ description:
+ - The reason for restriction.
+ type: str
+ sample: "QuotaId"
+'''
+
+from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from azure.mgmt.compute import ComputeManagementClient
+ from azure.core.exceptions import HttpResponseError
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class AzureRMVmskuInfo(AzureRMModuleBase):
+ def __init__(self):
+
+ self.module_arg_spec = dict(
+ location=dict(type='str'),
+ resource_type=dict(type='str'),
+ size=dict(type='str'),
+ zone=dict(type='bool', default=False)
+ )
+
+ self.results = dict(
+ available_skus=[],
+ count=0
+ )
+ self.location = None
+ self.resource_type = None
+ self.size = None
+ self.zone = False
+
+ super(AzureRMVmskuInfo, self).__init__(derived_arg_spec=self.module_arg_spec,
+ supports_check_mode=True,
+ supports_tags=False)
+
+ def list_skus(self):
+ try:
+ compute_client = self.get_mgmt_svc_client(ComputeManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager,
+ api_version='2021-07-01')
+ skus_result = compute_client.resource_skus.list()
+ available_skus = []
+ for sku_info in skus_result:
+ if self.location and not _match_location(self.location, sku_info.locations):
+ continue
+ if not _is_sku_available(sku_info, self.zone):
+ continue
+ if self.resource_type and not sku_info.resource_type.lower() == self.resource_type.lower():
+ continue
+ if self.size and not (sku_info.resource_type == 'virtualMachines' and self.size.lower() in sku_info.name.lower()):
+ continue
+ if self.zone and not (sku_info.location_info and sku_info.location_info[0].zones):
+ continue
+ available_skus.append(sku_info.as_dict())
+ return available_skus
+ except HttpResponseError as e:
+ # Handle exceptions
+ raise e
+
+ def exec_module(self, **kwargs):
+ for key in self.module_arg_spec:
+ setattr(self, key, kwargs[key])
+
+ available_skus = self.list_skus()
+ self.results['available_skus'] = available_skus
+ self.results['count'] = len(available_skus)
+ return self.results
+
+
+def _match_location(loc, locations):
+ return next((x for x in locations if x.lower() == loc.lower()), None)
+
+
+def _is_sku_available(sku_info, zone):
+ """
+ The SKU is unavailable in the following cases:
+ 1. regional restriction and the region is restricted
+ 2. parameter "zone" is input which indicates only showing skus with availability zones.
+ Meanwhile, zonal restriction and all zones are restricted
+ """
+ is_available = True
+ is_restrict_zone = False
+ is_restrict_location = False
+ if not sku_info.restrictions:
+ return is_available
+ for restriction in sku_info.restrictions:
+ if restriction.reason_code == 'NotAvailableForSubscription':
+ if restriction.type == 'Zone' and not (
+ set(sku_info.location_info[0].zones or []) - set(restriction.restriction_info.zones or [])):
+ is_restrict_zone = True
+ if restriction.type == 'Location' and (
+ sku_info.location_info[0].location in (restriction.restriction_info.locations or [])):
+ is_restrict_location = True
+ if is_restrict_location or (is_restrict_zone and zone):
+ is_available = False
+ break
+ return is_available
+
+
+def main():
+ AzureRMVmskuInfo()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webapp.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webapp.py
index e58cbcd43..622c5dd5a 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webapp.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webapp.py
@@ -213,7 +213,191 @@ options:
- stopped
- restarted
default: started
-
+ site_auth_settings:
+ description:
+ - Configuration settings for the Azure App Service Authentication / Authorization feature.
+ type: dict
+ suboptions:
+ kind:
+ description:
+ - Kind of resource.
+ type: str
+ enabled:
+ description:
+ - Whether enable or disable the Authentication / Authorization feature for the current app.
+ type: bool
+ runtime_version:
+ description:
+ - The RuntimeVersion of the Authentication / Authorization feature in use for the current app.
+ type: str
+ unauthenticated_client_action:
+ description:
+ - The action to take when an unauthenticated client attempts to access the app.
+ type: str
+ choices:
+ - RedirectToLoginPage
+ - AllowAnonymous
+ token_store_enabled:
+ description:
+ - Whether to use App Service Token Store.
+ type: bool
+ allowed_external_redirect_urls:
+ description:
+ - External URLs that can be redirected to as part of logging in or logging out of the app.
+ - Note that the query string part of the URL is ignored.
+ type: list
+ elements: str
+ default_provider:
+ description:
+ - The default authentication provider to use when multiple providers are configured.
+ type: str
+ choices:
+ - AzureActiveDirectory
+ - Facebook
+ - Google
+ - MicrosoftAccount
+ - Twitter
+ - Github
+ token_refresh_extension_hours:
+ description:
+ - The number of hours after session token expiration that a session token can be used to call the token refresh API.
+ type: float
+ client_id:
+ description:
+ - The Client ID of this relying party application, known as the client_id.
+ type: str
+ client_secret:
+ description:
+ - The Client Secret of this relying party application (in Azure Active Directory, this is also referred to as the Key).
+ type: str
+ client_secret_setting_name:
+ description:
+ - The app setting name that contains the client secret of the relying party application.
+ type: str
+ client_secret_certificate_thumbprint:
+ description:
+ - An alternative to the client secret, that is the thumbprint of a certificate used for signing purposes.
+ - This property acts as a replacement for the Client Secret. It is also optional.
+ type: str
+ issuer:
+ description:
+ - The OpenID Connect Issuer URI that represents the entity which issues access tokens for this application.
+ type: str
+ allowed_audiences:
+ description:
+ - Allowed audience values to consider when validating JWTs issued by Azure Active Directory.
+ type: list
+ elements: str
+ aad_claims_authorization:
+ description:
+ - Gets a JSON string containing the Azure AD Acl settings.
+ type: str
+ additional_login_params:
+ description:
+ - Login parameters to send to the OpenID Connect authorization endpoint when a user logs in.
+ - Each parameter must be in the form "key=value".
+ type: str
+ google_client_id:
+ description:
+ - The OpenID Connect Client ID for the Google web application.
+ type: str
+ google_client_secret:
+ description:
+ - The client secret associated with the Google web application.
+ type: str
+ google_client_secret_setting_name:
+ description:
+ - The app setting name that contains the client secret associated with the Google web application.
+ type: str
+ google_o_auth_scopes:
+ description:
+ - The OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication.
+ - This setting is optional. If not specified, "openid", "profile", and "email" are used as default scopes.
+ type: list
+ elements: str
+ facebook_app_id:
+ description:
+ - The App ID of the Facebook app used for login.
+ type: str
+ facebook_app_secret:
+ description:
+ - The App Secret of the Facebook app used for Facebook Login.
+ type: str
+ facebook_app_secret_setting_name:
+ description:
+ - The app setting name that contains the app secret used for Facebook Login.
+ type: str
+ facebook_o_auth_scopes:
+ description:
+ - The OAuth 2.0 scopes that will be requested as part of Facebook for Facebook Login.
+ type: list
+ elements: str
+ git_hub_client_id:
+ description:
+ - The Client Id of the GitHub app used for login.
+ type: str
+ git_hub_client_secret:
+ description:
+ - The Client Secret of the GitHub app used for Github Login.
+ type: str
+ git_hub_client_secret_setting_name:
+ description:
+ - The app setting name that contains the client secret of the Github app used for GitHub Login.
+ type: str
+ git_hub_o_auth_scopes:
+ description:
+ - The OAuth 2.0 scopes that will be requested as part of GitHub Login authentication.
+ - This setting is optional.
+ type: list
+ elements: str
+ twitter_consumer_key:
+ description:
+ - The OAuth 1.0a consumer key of the Twitter application used for sign-in.
+ type: str
+ twitter_consumer_secret:
+ description:
+ - The OAuth 1.0a consumer secret of the Twitter application used for sign-in.
+ - This setting is required for enabling Twitter Sign-In.
+ type: str
+ twitter_consumer_secret_setting_name:
+ description:
+ - The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in.
+ type: str
+ microsoft_account_client_id:
+ description:
+ - The OAuth 2.0 client ID that was created for the app used for authentication.
+ - This setting is required for enabling Microsoft Account authentication.
+ type: str
+ microsoft_account_client_secret:
+ description:
+ - The OAuth 2.0 client secret that was created for the app used for authentication.
+ type: str
+ microsoft_account_client_secret_setting_name:
+ description:
+ - The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication.
+ type: str
+ microsoft_account_o_auth_scopes:
+ description:
+ - The OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication.
+ type: list
+ elements: str
+ is_auth_from_file:
+ description:
+ - If I(is_auth_from_file=true), the auth config settings should be read from a file.
+ type: str
+ choices:
+ - 'true'
+ - 'false'
+ auth_file_path:
+ description:
+ - The path of the config file containing auth settings.
+ - If the path is relative, base will the site's root directory.
+ type: str
+ config_version:
+ description:
+ - The ConfigVersion of the Authentication / Authorization feature in use for the current app.
+ - The setting in this value can control the behavior of the control plane for Authentication / Authorization.
+ type: str
state:
description:
- State of the Web App.
@@ -337,6 +521,26 @@ EXAMPLES = '''
java_container: "Tomcat"
java_container_version: "8.5"
+- name: Create a windows web app with site_auth_settings
+ azure_rm_webapp:
+ resource_group: myResourceGroup
+ name: myWindowWebapp
+ site_auth_settings:
+ client_id: 'xxxxxxxx-xxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx'
+ default_provider: 'MicrosoftAccount'
+ runtime_version: '-2'
+ token_refresh_extension_hours: 120
+ unauthenticated_client_action: 'RedirectToLoginPage'
+ client_secret: 'xxxxxxxx-xxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx'
+ token_store_enabled: false
+ enabled: true
+ is_auth_from_file: false
+ plan:
+ resource_group: myResourceGroup
+ name: myLinuxwebapp
+ is_linux: false
+ sku: S1
+
- name: Create a linux web app with python framework
azure_rm_webapp:
resource_group: myResourceGroup
@@ -360,11 +564,12 @@ id:
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/sites/myWebApp"
'''
-from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
+from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt
try:
from azure.core.exceptions import ResourceNotFoundError
from azure.core.polling import LROPoller
+ from azure.core.exceptions import HttpResponseError
from azure.mgmt.web.models import Site, AppServicePlan, SkuDescription, NameValuePair, SiteSourceControl, StringDictionary
except ImportError:
# This is handled in azure_rm_common
@@ -399,6 +604,48 @@ framework_spec = dict(
)
+site_auth_settings_spec = dict(
+ kind=dict(type='str'),
+ enabled=dict(type='bool'),
+ runtime_version=dict(type='str'),
+ unauthenticated_client_action=dict(type='str', choices=["RedirectToLoginPage", "AllowAnonymous"]),
+ token_store_enabled=dict(type='bool'),
+ allowed_external_redirect_urls=dict(type='list', elements='str'),
+ default_provider=dict(type='str', choices=["Facebook", "Google", "MicrosoftAccount", "Twitter", "Github", "AzureActiveDirectory"]),
+ token_refresh_extension_hours=dict(type='float', no_log=True),
+ client_id=dict(type='str'),
+ client_secret=dict(type='str', no_log=True),
+ client_secret_setting_name=dict(type='str'),
+ client_secret_certificate_thumbprint=dict(type='str', no_log=True),
+ issuer=dict(type='str'),
+ allowed_audiences=dict(type='list', elements='str'),
+ additional_login_params=dict(type='str'),
+ aad_claims_authorization=dict(type='str'),
+ google_client_id=dict(type='str'),
+ google_client_secret=dict(type='str', no_log=True),
+ google_client_secret_setting_name=dict(type='str'),
+ google_o_auth_scopes=dict(type='list', elements='str'),
+ facebook_app_id=dict(type='str'),
+ facebook_app_secret=dict(type='str', no_log=True),
+ facebook_app_secret_setting_name=dict(type='str'),
+ facebook_o_auth_scopes=dict(type='list', elements='str'),
+ git_hub_client_id=dict(type='str'),
+ git_hub_client_secret=dict(type='str', no_log=True),
+ git_hub_client_secret_setting_name=dict(type='str'),
+ git_hub_o_auth_scopes=dict(type='list', elements='str'),
+ twitter_consumer_key=dict(type='str', no_log=True),
+ twitter_consumer_secret=dict(type='str', no_log=True),
+ twitter_consumer_secret_setting_name=dict(type='str'),
+ microsoft_account_client_id=dict(type='str'),
+ microsoft_account_client_secret=dict(type='str', no_log=True),
+ microsoft_account_client_secret_setting_name=dict(type='str'),
+ microsoft_account_o_auth_scopes=dict(type='list', elements='str'),
+ is_auth_from_file=dict(type='str', choices=['true', 'false']),
+ auth_file_path=dict(type='str'),
+ config_version=dict(type='str')
+)
+
+
def _normalize_sku(sku):
if sku is None:
return sku
@@ -462,10 +709,10 @@ def webapp_to_dict(webapp):
class Actions:
- CreateOrUpdate, UpdateAppSettings, Delete = range(3)
+ CreateOrUpdate, UpdateAppSettings, UpdateAuthSettings, Delete = range(4)
-class AzureRMWebApps(AzureRMModuleBase):
+class AzureRMWebApps(AzureRMModuleBaseExt):
"""Configuration class for an Azure RM Web App resource"""
def __init__(self):
@@ -536,6 +783,10 @@ class AzureRMWebApps(AzureRMModuleBase):
choices=['started', 'stopped', 'restarted'],
default='started'
),
+ site_auth_settings=dict(
+ type='dict',
+ options=site_auth_settings_spec
+ ),
state=dict(
type='str',
default='present',
@@ -559,6 +810,7 @@ class AzureRMWebApps(AzureRMModuleBase):
self.site_config = dict()
self.app_settings = dict()
self.app_settings_strDic = None
+ self.site_auth_settings = None
# app service plan
self.plan = None
@@ -631,6 +883,7 @@ class AzureRMWebApps(AzureRMModuleBase):
if old_response:
self.results['id'] = old_response['id']
+ self.results['site_auth_settings'] = self.get_auth_settings()
if self.state == 'present':
if not self.plan and not old_response:
@@ -723,6 +976,8 @@ class AzureRMWebApps(AzureRMModuleBase):
to_be_updated = True
self.to_do.append(Actions.CreateOrUpdate)
+ if self.site_auth_settings is not None:
+ self.to_do.append(Actions.UpdateAuthSettings)
self.site.tags = self.tags
# service plan is required for creation
@@ -764,6 +1019,7 @@ class AzureRMWebApps(AzureRMModuleBase):
if update_tags:
to_be_updated = True
+ self.to_do.append(Actions.CreateOrUpdate)
# check if root level property changed
if self.is_updatable_property_changed(old_response):
@@ -799,6 +1055,12 @@ class AzureRMWebApps(AzureRMModuleBase):
for key in self.app_settings.keys():
self.app_settings_strDic[key] = self.app_settings[key]
+ if self.site_auth_settings is not None:
+ result = dict(compare=[])
+ if not self.default_compare({}, self.site_auth_settings, self.results['site_auth_settings'], '', dict(compare=[])):
+ to_be_updated = True
+ self.to_do.append(Actions.UpdateAuthSettings)
+
elif self.state == 'absent':
if old_response:
self.log("Delete Web App instance")
@@ -830,6 +1092,12 @@ class AzureRMWebApps(AzureRMModuleBase):
update_response = self.update_app_settings()
self.results['id'] = update_response.id
+ if Actions.UpdateAuthSettings in self.to_do:
+ auth_settings = self.update_auth_settings(self.site_auth_settings)
+ self.results['site_auth_settings'] = auth_settings
+ else:
+ self.results['site_auth_settings'] = self.get_auth_settings()
+
webapp = None
if old_response:
webapp = old_response
@@ -1026,6 +1294,36 @@ class AzureRMWebApps(AzureRMModuleBase):
self.fail("Failed to list application settings for web app {0} in resource group {1}: {2}".format(
self.name, self.resource_group, str(ex)))
+ def update_auth_settings(self, body):
+ '''
+ Update web app auth settings
+ :return: deserialized updating response
+ '''
+ self.log("Update web app auth settings")
+
+ try:
+ response = self.web_client.web_apps.update_auth_settings(self.resource_group, self.name, body)
+ self.log("Response : {0}".format(response))
+ return response.as_dict()
+ except HttpResponseError as ex:
+ self.fail("Failed to update web app auth settings for web app {0} in resource group {1}: {2}".format(
+ self.name, self.resource_group, str(ex)))
+
+ def get_auth_settings(self):
+ '''
+ Get the web app auth settings
+ :return: deserialized updating response
+ '''
+ self.log("Get the web app auth settings")
+
+ try:
+ response = self.web_client.web_apps.get_auth_settings(self.resource_group, self.name)
+ self.log("Response : {0}".format(response))
+ return response.as_dict()
+ except HttpResponseError as ex:
+ self.log("Failed to Get web app auth settings for web app {0} in resource group {1}: {2}".format(
+ self.name, self.resource_group, str(ex)))
+
def update_app_settings(self):
'''
Update application settings
@@ -1040,9 +1338,8 @@ class AzureRMWebApps(AzureRMModuleBase):
response = self.web_client.web_apps.update_application_settings(
resource_group_name=self.resource_group, name=self.name, app_settings=settings)
self.log("Response : {0}".format(response))
-
return response
- except Exception as ex:
+ except HttpResponseError as ex:
self.fail("Failed to update application settings for web app {0} in resource group {1}: {2}".format(
self.name, self.resource_group, str(ex)))
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webapp_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webapp_info.py
index c0ec6b42d..22a5fea73 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webapp_info.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webapp_info.py
@@ -244,11 +244,18 @@ webapps:
returned: always
type: dict
sample: { tag1: abc }
+ site_auth_settings:
+ description:
+ - The Authentication / Authorization settings associated with web app.
+ type: dict
+ returned: always
+ sample: {}
'''
try:
from azure.core.exceptions import ResourceNotFoundError
from azure.core.polling import LROPoller
from azure.mgmt.web.models import CsmPublishingProfileOptions
+ from azure.core.exceptions import HttpResponseError
except Exception:
# This is handled in azure_rm_common
pass
@@ -390,6 +397,14 @@ class AzureRMWebAppInfo(AzureRMModuleBase):
self.fail('Error getting web app {0} publishing credentials - {1}'.format(request_id, str(ex)))
return response
+ def get_auth_settings(self, resource_group, name):
+ self.log('Get web app {0} auth settings'.format(name))
+ try:
+ response = self.web_client.web_apps.get_auth_settings(resource_group_name=resource_group, name=name)
+ return response.as_dict()
+ except HttpResponseError as ex:
+ self.log('Error getting web app {0} auth setting, exception as {1}'.format(name, str(ex)))
+
def get_webapp_ftp_publish_url(self, resource_group, name):
self.log('Get web app {0} app publish profile'.format(name))
@@ -430,6 +445,7 @@ class AzureRMWebAppInfo(AzureRMModuleBase):
app_settings = self.list_webapp_appsettings(resource_group, name)
publish_cred = self.get_publish_credentials(resource_group, name)
ftp_publish_url = self.get_webapp_ftp_publish_url(resource_group, name)
+ site_auth_settings = self.get_auth_settings(resource_group, name)
except Exception:
pass
return self.construct_curated_webapp(webapp=pip,
@@ -437,7 +453,8 @@ class AzureRMWebAppInfo(AzureRMModuleBase):
app_settings=app_settings,
deployment_slot=None,
ftp_publish_url=ftp_publish_url,
- publish_credentials=publish_cred)
+ publish_credentials=publish_cred,
+ site_auth_settings=site_auth_settings)
def construct_curated_webapp(self,
webapp,
@@ -445,7 +462,8 @@ class AzureRMWebAppInfo(AzureRMModuleBase):
app_settings=None,
deployment_slot=None,
ftp_publish_url=None,
- publish_credentials=None):
+ publish_credentials=None,
+ site_auth_settings=None):
curated_output = dict()
curated_output['id'] = webapp['id']
curated_output['name'] = webapp['name']
@@ -514,6 +532,9 @@ class AzureRMWebAppInfo(AzureRMModuleBase):
if publish_credentials and self.return_publish_profile:
curated_output['publishing_username'] = publish_credentials.publishing_user_name
curated_output['publishing_password'] = publish_credentials.publishing_password
+
+ # curated auth settings
+ curated_output['site_auth_settings'] = site_auth_settings if site_auth_settings is not None else {}
return curated_output
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webappaccessrestriction.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webappaccessrestriction.py
index bfe2b7591..4f44067b1 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webappaccessrestriction.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webappaccessrestriction.py
@@ -39,7 +39,6 @@ options:
- The web app's HTTP access restrictions.
type: list
elements: dict
- default: []
suboptions:
name:
description:
@@ -66,7 +65,28 @@ options:
description:
- IPv4 address (with subnet mask) of the access restriction.
type: str
- required: true
+ subnet_traffic_tag:
+ description:
+ - (internal) Subnet traffic tags.
+ type: int
+ vnet_traffic_tag:
+ description:
+ - (internal) Vnet traffic tag.
+ type: int
+ tag:
+ description:
+ - IP restriction rule description.
+ type: str
+ choices:
+ - Default
+ - XffProxy
+ - ServiceTag
+ vnet_subnet_resource_id:
+ description:
+ - The Virtual network relaste subnet resource id.
+ - Only I(ip_adress) or I(vnet_subnet_resource_id) property must be specified.
+ - Parameter I(vnet_subnet_resource_id) cannot be used with I(subnet_traffic_tag) or I(vnet_traffic_tag) at the same time.
+ type: str
scm_ip_security_restrictions:
description:
- >-
@@ -74,7 +94,6 @@ options:
the SCM restrictions will be configured but not used.
type: list
elements: dict
- default: []
suboptions:
name:
description:
@@ -101,7 +120,28 @@ options:
description:
- IPv4 address (with subnet mask) of the access restriction.
type: str
- required: true
+ subnet_traffic_tag:
+ description:
+ - (internal) Subnet traffic tags.
+ type: int
+ vnet_traffic_tag:
+ description:
+ - (internal) Vnet traffic tag.
+ type: int
+ tag:
+ description:
+ - IP restriction rule description.
+ type: str
+ choices:
+ - Default
+ - XffProxy
+ - ServiceTag
+ vnet_subnet_resource_id:
+ description:
+ - The Virtual network relaste subnet resource id.
+ - Only I(ip_adress) or I(vnet_subnet_resource_id) property must be specified.
+ - Parameter I(vnet_subnet_resource_id) cannot be used with I(subnet_traffic_tag) or I(vnet_traffic_tag) at the same time.
+ type: str
scm_ip_security_restrictions_use_main:
description:
- >-
@@ -131,6 +171,12 @@ EXAMPLES = '''
action: "Allow"
ip_address: "2.2.2.2/24"
priority: 2
+ - name: "Datacenter 3"
+ action: Allow
+ priority: 3
+ description: "for test 02"
+ tag: XffProxy
+ vnet_subnet_resource_id: "{{ subnet_output.state.id }}"
scm_ip_security_restrictions_use_main: true
- name: Delete web app network access restrictions.
@@ -178,6 +224,30 @@ ip_security_restrictions:
returned: always
type: str
sample: 1.1.1.1/32
+ subnet_traffic_tag:
+ description:
+ - (internal) Subnet traffic tags.
+ type: int
+ returned: always
+ sample: int
+ vnet_traffic_tag:
+ description:
+ - (internal) Vnet traffic tag.
+ type: int
+ returned: always
+ sample: 3
+ tag:
+ description:
+ - IP restriction rule description.
+ type: str
+ returned: always
+ sample: default
+ vnet_subnet_resource_id:
+ description:
+ - The Virtual network relaste subnet resource id.
+ type: str
+ returned: always
+ sample: "/subscriptions/xxx-xxx/resourceGroups/testRG/providers/Microsoft.Network/virtualNetworks/vnet01/subnets/subnet01"
scm_ip_security_restrictions:
description:
- The web app's SCM access restrictions.
@@ -215,6 +285,30 @@ scm_ip_security_restrictions:
returned: always
type: str
sample: 1.1.1.1/32
+ subnet_traffic_tag:
+ description:
+ - (internal) Subnet traffic tags.
+ type: int
+ returned: always
+ sample: int
+ vnet_traffic_tag:
+ description:
+ - (internal) Vnet traffic tag.
+ type: int
+ returned: always
+ sample: 3
+ tag:
+ description:
+ - IP restriction rule description.
+ type: str
+ returned: always
+ sample: default
+ vnet_subnet_resource_id:
+ description:
+ - The Virtual network relaste subnet resource id.
+ type: str
+ returned: always
+ sample: "/subscriptions/xxx-xxx/resourceGroups/testRG/providers/Microsoft.Network/virtualNetworks/vnet01/subnets/subnet01"
scm_ip_security_restrictions_use_main:
description:
- Whether the HTTP access restrictions are used for SCM access.
@@ -223,7 +317,7 @@ scm_ip_security_restrictions_use_main:
sample: false
'''
-from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
+from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt
try:
from azure.mgmt.web.models import IpSecurityRestriction
@@ -236,11 +330,15 @@ ip_restriction_spec = dict(
description=dict(type='str'),
action=dict(type='str', default='Allow', choices=['Allow', 'Deny']),
priority=dict(type='int', required=True),
- ip_address=dict(type='str', required=True),
+ ip_address=dict(type='str'),
+ vnet_subnet_resource_id=dict(type='str'),
+ vnet_traffic_tag=dict(type='int'),
+ subnet_traffic_tag=dict(type='int'),
+ tag=dict(type='str', choices=["Default", "XffProxy", "ServiceTag"]),
)
-class AzureRMWebAppAccessRestriction(AzureRMModuleBase):
+class AzureRMWebAppAccessRestriction(AzureRMModuleBaseExt):
def __init__(self):
@@ -248,8 +346,8 @@ class AzureRMWebAppAccessRestriction(AzureRMModuleBase):
name=dict(type='str', required=True),
resource_group=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present', 'absent']),
- ip_security_restrictions=dict(type='list', default=[], elements='dict', options=ip_restriction_spec),
- scm_ip_security_restrictions=dict(type='list', default=[], elements='dict', options=ip_restriction_spec),
+ ip_security_restrictions=dict(type='list', elements='dict', options=ip_restriction_spec),
+ scm_ip_security_restrictions=dict(type='list', elements='dict', options=ip_restriction_spec),
scm_ip_security_restrictions_use_main=dict(type='bool', default=False),
)
@@ -263,8 +361,8 @@ class AzureRMWebAppAccessRestriction(AzureRMModuleBase):
self.state = None
self.name = None
self.resource_group = None
- self.ip_security_restrictions = []
- self.scm_ip_security_restrictions = []
+ self.ip_security_restrictions = None
+ self.scm_ip_security_restrictions = None
self.scm_ip_security_restrictions_use_main = False
super(AzureRMWebAppAccessRestriction, self).__init__(self.module_arg_spec,
@@ -318,9 +416,16 @@ class AzureRMWebAppAccessRestriction(AzureRMModuleBase):
return site_config
def has_updates(self, site_config):
- return (site_config.scm_ip_security_restrictions_use_main != self.scm_ip_security_restrictions_use_main or self.ip_security_restrictions and
- self.ip_security_restrictions != self.to_restriction_dict_list(site_config.ip_security_restrictions) or self.scm_ip_security_restrictions and
- self.scm_ip_security_restrictions != self.to_restriction_dict_list(site_config.scm_ip_security_restrictions))
+ changed = False
+ if site_config.scm_ip_security_restrictions_use_main != self.scm_ip_security_restrictions_use_main:
+ changed = True
+ elif not self.default_compare({}, self.ip_security_restrictions,
+ self.to_restriction_dict_list(site_config.ip_security_restrictions), '', dict(compare=[])):
+ changed = True
+ elif not self.default_compare({}, self.scm_ip_security_restrictions,
+ self.to_restriction_dict_list(site_config.scm_ip_security_restrictions), '', dict(compare=[])):
+ changed = True
+ return changed
def has_access_restrictions(self, site_config):
return site_config.ip_security_restrictions or site_config.scm_ip_security_restrictions
@@ -356,6 +461,10 @@ class AzureRMWebAppAccessRestriction(AzureRMModuleBase):
action=restriction_dict['action'],
priority=restriction_dict['priority'],
ip_address=restriction_dict['ip_address'],
+ vnet_subnet_resource_id=restriction_dict['vnet_subnet_resource_id'],
+ vnet_traffic_tag=restriction_dict['vnet_traffic_tag'],
+ subnet_traffic_tag=restriction_dict['subnet_traffic_tag'],
+ tag=restriction_dict['tag'],
)
def to_restriction_dict_list(self, restriction_obj_list):
@@ -379,6 +488,10 @@ class AzureRMWebAppAccessRestriction(AzureRMModuleBase):
action=restriction_obj.action,
priority=restriction_obj.priority,
ip_address=restriction_obj.ip_address,
+ vnet_subnet_resource_id=restriction_obj.vnet_subnet_resource_id,
+ vnet_traffic_tag=restriction_obj.vnet_traffic_tag,
+ subnet_traffic_tag=restriction_obj.subnet_traffic_tag,
+ tag=restriction_obj.tag,
)
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webappaccessrestriction_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webappaccessrestriction_info.py
index 2d07bc2de..901d6b806 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webappaccessrestriction_info.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webappaccessrestriction_info.py
@@ -79,6 +79,30 @@ ip_security_restrictions:
returned: always
type: str
sample: 1.1.1.1/32
+ subnet_traffic_tag:
+ description:
+ - (internal) Subnet traffic tags.
+ type: int
+ returned: always
+ sample: int
+ vnet_traffic_tag:
+ description:
+ - (internal) Vnet traffic tag.
+ type: int
+ returned: always
+ sample: 3
+ tags:
+ description:
+ - IP restriction rule description.
+ type: str
+ returned: always
+ sample: default
+ vnet_subnet_resource_id:
+ description:
+ - The Virtual network relaste subnet resource id.
+ type: str
+ returned: always
+ sample: "/subscriptions/xxx-xxx/resourceGroups/testRG/providers/Microsoft.Network/virtualNetworks/vnet01/subnets/subnet01"
scm_ip_security_restrictions:
description:
- The web app's SCM access restrictions.
@@ -116,6 +140,30 @@ scm_ip_security_restrictions:
returned: always
type: str
sample: 1.1.1.1/32
+ subnet_traffic_tag:
+ description:
+ - (internal) Subnet traffic tags.
+ type: int
+ returned: always
+ sample: int
+ vnet_traffic_tag:
+ description:
+ - (internal) Vnet traffic tag.
+ type: int
+ returned: always
+ sample: 3
+ tag:
+ description:
+ - IP restriction rule description.
+ type: str
+ returned: always
+ sample: default
+ vnet_subnet_resource_id:
+ description:
+ - The Virtual network relaste subnet resource id.
+ type: str
+ returned: always
+ sample: "/subscriptions/xxx-xxx/resourceGroups/testRG/providers/Microsoft.Network/virtualNetworks/vnet01/subnets/subnet01"
scm_ip_security_restrictions_use_main:
description:
- Whether the HTTP access restrictions are used for SCM access.
@@ -196,6 +244,10 @@ class AzureRMWebAppAccessRestrictionInfo(AzureRMModuleBase):
action=restriction_obj.action,
priority=restriction_obj.priority,
ip_address=restriction_obj.ip_address,
+ vnet_subnet_resource_id=restriction_obj.vnet_subnet_resource_id,
+ vnet_traffic_tag=restriction_obj.vnet_traffic_tag,
+ subnet_traffic_tag=restriction_obj.subnet_traffic_tag,
+ tag=restriction_obj.tag,
)
diff --git a/ansible_collections/azure/azcollection/pr-pipelines.yml b/ansible_collections/azure/azcollection/pr-pipelines.yml
index 7ddcc40dc..744860d8d 100644
--- a/ansible_collections/azure/azcollection/pr-pipelines.yml
+++ b/ansible_collections/azure/azcollection/pr-pipelines.yml
@@ -53,7 +53,6 @@ parameters:
- "azure_rm_containerregistry"
- "azure_rm_containerregistrytag"
- "azure_rm_cosmosdbaccount"
- - "azure_rm_datalakestore"
- "azure_rm_ddosprotectionplan"
- "azure_rm_deployment"
- "azure_rm_diskencryptionset"
@@ -74,6 +73,7 @@ parameters:
- "azure_rm_keyvaultsecret"
- "azure_rm_loadbalancer"
- "azure_rm_loganalyticsworkspace"
+ - "azure_rm_localnetworkgateway"
- "azure_rm_manageddisk"
- "azure_rm_managementgroup"
- "azure_rm_mariadbserver"
@@ -109,6 +109,7 @@ parameters:
- "azure_rm_servicebus"
- "azure_rm_sqlserver"
- "azure_rm_storageaccount"
+ - "azure_rm_storageaccountmanagementpolicy"
- "azure_rm_storageblob"
- "azure_rm_storageshare"
- "azure_rm_subnet"
@@ -122,6 +123,7 @@ parameters:
- "azure_rm_virtualnetwork"
- "azure_rm_virtualnetworkgateway"
- "azure_rm_virtualnetworkpeering"
+ - "azure_rm_virtualnetworkgatewaynatrule"
- "azure_rm_virtualwan"
- "azure_rm_vpnsite"
- "azure_rm_virtualhub"
@@ -137,6 +139,9 @@ parameters:
- "azure_rm_bastionhost"
- "azure_rm_devtestlab"
- "azure_rm_sqlmanagedinstance"
+ - "azure_rm_sqlmidatabase"
+ - "azure_rm_sqlmidblongtermretentionpolicy"
+ - "azure_rm_sqlmidbshorttermretentionpolicy"
- "azure_rm_vmssnetworkinterface_info"
- "azure_rm_sshpublickey"
- "inventory_azure"
diff --git a/ansible_collections/azure/azcollection/requirements-azure.txt b/ansible_collections/azure/azcollection/requirements.txt
index 5fc0064aa..557c59e47 100644
--- a/ansible_collections/azure/azcollection/requirements-azure.txt
+++ b/ansible_collections/azure/azcollection/requirements.txt
@@ -9,13 +9,12 @@ azure-mgmt-authorization==2.0.0
azure-mgmt-apimanagement==3.0.0
azure-mgmt-batch==16.2.0
azure-mgmt-cdn==11.0.0
-azure-mgmt-compute==26.1.0
+azure-mgmt-compute==30.6.0
azure-mgmt-containerinstance==9.0.0
-azure-mgmt-core==1.3.0
+azure-mgmt-core==1.4.0
azure-mgmt-containerregistry==9.1.0
azure-containerregistry==1.1.0
azure-mgmt-containerservice==20.0.0
-azure-mgmt-datalake-store==1.0.0
azure-mgmt-datafactory==2.0.0
azure-mgmt-dns==8.0.0
azure-mgmt-marketplaceordering==1.1.0
@@ -27,7 +26,7 @@ azure-mgmt-nspkg==2.0.0
azure-mgmt-privatedns==1.0.0
azure-mgmt-redis==13.0.0
azure-mgmt-resource==21.1.0
-azure-mgmt-rdbms==10.0.0
+azure-mgmt-rdbms==10.2.0b12
azure-mgmt-search==8.0.0
azure-mgmt-servicebus==7.1.0
azure-mgmt-sql==3.0.1
diff --git a/ansible_collections/azure/azcollection/sanity-requirements-azure.txt b/ansible_collections/azure/azcollection/sanity-requirements.txt
index 65bf3ad16..65bf3ad16 100644
--- a/ansible_collections/azure/azcollection/sanity-requirements-azure.txt
+++ b/ansible_collections/azure/azcollection/sanity-requirements.txt
diff --git a/ansible_collections/azure/azcollection/tests/integration/requirements.txt b/ansible_collections/azure/azcollection/tests/integration/requirements.txt
new file mode 100644
index 000000000..557c59e47
--- /dev/null
+++ b/ansible_collections/azure/azcollection/tests/integration/requirements.txt
@@ -0,0 +1,51 @@
+packaging
+requests[security]
+xmltodict
+msgraph-sdk==1.0.0
+azure-cli-core==2.34.0
+azure-common==1.1.11
+azure-identity==1.14.0
+azure-mgmt-authorization==2.0.0
+azure-mgmt-apimanagement==3.0.0
+azure-mgmt-batch==16.2.0
+azure-mgmt-cdn==11.0.0
+azure-mgmt-compute==30.6.0
+azure-mgmt-containerinstance==9.0.0
+azure-mgmt-core==1.4.0
+azure-mgmt-containerregistry==9.1.0
+azure-containerregistry==1.1.0
+azure-mgmt-containerservice==20.0.0
+azure-mgmt-datafactory==2.0.0
+azure-mgmt-dns==8.0.0
+azure-mgmt-marketplaceordering==1.1.0
+azure-mgmt-monitor==3.0.0
+azure-mgmt-managedservices==6.0.0
+azure-mgmt-managementgroups==1.0.0
+azure-mgmt-network==19.1.0
+azure-mgmt-nspkg==2.0.0
+azure-mgmt-privatedns==1.0.0
+azure-mgmt-redis==13.0.0
+azure-mgmt-resource==21.1.0
+azure-mgmt-rdbms==10.2.0b12
+azure-mgmt-search==8.0.0
+azure-mgmt-servicebus==7.1.0
+azure-mgmt-sql==3.0.1
+azure-mgmt-storage==19.0.0
+azure-mgmt-trafficmanager==1.0.0b1
+azure-mgmt-web==6.1.0
+azure-nspkg==2.0.0
+azure-storage-blob==12.11.0
+azure-core==1.28.0
+azure-keyvault==4.2.0
+azure-mgmt-keyvault==10.0.0
+azure-mgmt-cosmosdb==6.4.0
+azure-mgmt-hdinsight==9.0.0
+azure-mgmt-devtestlabs==9.0.0
+azure-mgmt-loganalytics==12.0.0
+azure-mgmt-automation==1.0.0
+azure-mgmt-iothub==2.2.0
+azure-iot-hub==2.6.1
+azure-mgmt-recoveryservices==2.0.0
+azure-mgmt-recoveryservicesbackup==3.0.0
+azure-mgmt-notificationhubs==7.0.0
+azure-mgmt-eventhub==10.1.0
diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adapplication/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adapplication/tasks/main.yml
index d19e0b8cd..86a06c675 100644
--- a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adapplication/tasks/main.yml
+++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_adapplication/tasks/main.yml
@@ -21,7 +21,7 @@
ansible.builtin.assert:
that: not output.changed
-- name: Create application with more parameter
+- name: Create application with more parameters
azure_rm_adapplication:
display_name: "{{ display_name }}-01"
sign_in_audience: AzureADandPersonalMicrosoftAccount
@@ -37,6 +37,16 @@
display_name: "{{ display_name }}_approle"
is_enabled: true
value: Password@0329
+ optional_claims:
+ access_token_claims:
+ - name: aud
+ essential: true
+ id_token_claims:
+ - name: acct
+ essential: true
+ saml2_token_claims:
+ - name: acct
+ essential: true
register: second_output
- name: Assert secondary resource create success
diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aduser/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aduser/tasks/main.yml
index c02a263bb..24e686e52 100644
--- a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aduser/tasks/main.yml
+++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aduser/tasks/main.yml
@@ -1,35 +1,50 @@
- name: Prepare facts
ansible.builtin.set_fact:
- user_id: "user{{ 999999999999999999994 | random | to_uuid }}@contoso.com"
- object_id: "{{ 999999999999999999994 | random | to_uuid }}"
- user_principal_name: "{{ 999999999999999999994 | random | to_uuid }}"
+ user_name: "test_user_{{ 999999999999999999994 | random | to_uuid }}"
+ on_premises_immutable_id: "{{ 999999999999999999994 | random | to_uuid }}"
+ password_profile: "{{ lookup('community.general.random_string', length=12, min_lower=1, min_upper=1, min_special=1, min_numeric=1) }}"
+ domain: change_me.com
run_once: true
- name: Create test user
azure_rm_aduser:
- user_principal_name: "{{ user_id }}"
+ user_principal_name: "{{ user_name }}@{{ domain }}"
state: "present"
account_enabled: true
- display_name: "Test_{{ user_principal_name }}_Display_Name"
- password_profile: "password"
- mail_nickname: "Test_{{ user_principal_name }}_mail_nickname"
- immutable_id: "{{ object_id }}"
+ display_name: "{{ user_name }}_display_name"
+ password_profile: "{{ password_profile }}"
+ mail_nickname: "{{ user_name }}_mail_nickname"
+ on_premises_immutable_id: "{{ on_premises_immutable_id }}"
given_name: "First"
surname: "Last"
user_type: "Member"
usage_location: "US"
- mail: "{{ user_principal_name }}@contoso.com"
+ mail: "{{ user_name }}@{{ domain }}"
+ company_name: "Test Company"
+ on_premises_extension_attributes:
+ extension_attribute1: "test_extension_attribute1"
+ extension_attribute2: "test_extension_attribute2"
+ extension_attribute11: "test_extension_attribute11"
register: create_user_should_pass
- name: Try to update existing user - idempotent check
azure_rm_aduser:
- user_principal_name: "{{ user_id }}"
+ user_principal_name: "{{ user_name }}@{{ domain }}"
state: "present"
- display_name: "Test_{{ user_principal_name }}_Display_Name"
- mail_nickname: "Test_{{ user_principal_name }}_mail_nickname"
+ account_enabled: true
+ display_name: "{{ user_name }}_display_name"
+ mail_nickname: "{{ user_name }}_mail_nickname"
+ on_premises_immutable_id: "{{ on_premises_immutable_id }}"
given_name: "First"
surname: "Last"
- mail: "{{ user_principal_name }}@contoso.com"
+ user_type: "Member"
+ usage_location: "US"
+ mail: "{{ user_name }}@{{ domain }}"
+ company_name: "Test Company"
+ on_premises_extension_attributes:
+ extension_attribute1: "test_extension_attribute1"
+ extension_attribute2: "test_extension_attribute2"
+ extension_attribute11: "test_extension_attribute11"
register: attempted_update_with_no_changes_should_pass
- name: Assert Nothing Changed
@@ -39,42 +54,49 @@
- name: User_principal_name Should Pass
azure_rm_aduser_info:
- user_principal_name: "{{ user_id }}"
- register: get_user_should_pass
+ user_principal_name: "{{ user_name }}@{{ domain }}"
+ register: get_user_by_upn_should_pass
+
+- name: Attribute_name mail Should Pass
+ azure_rm_aduser_info:
+ attribute_name: "mail"
+ attribute_value: "{{ user_name }}@{{ domain }}"
+ register: get_user_by_mail_should_pass
- name: Assert user was created and account is enabled
ansible.builtin.assert:
that:
- - "create_user_should_pass['ad_users'][0]['account_enabled'] == True"
- - "get_user_should_pass['ad_users'][0]['account_enabled'] == True"
+ - "create_user_should_pass['ad_user']['account_enabled'] == True"
+ - "get_user_by_upn_should_pass['ad_users'][0]['account_enabled'] == True"
+ - "get_user_by_mail_should_pass['ad_users'][0]['account_enabled'] == True"
- name: Update test user
azure_rm_aduser:
- user_principal_name: "{{ user_id }}"
+ user_principal_name: "{{ user_name }}@{{ domain }}"
state: "present"
account_enabled: false
register: update_user_should_pass
- name: User_principal_name on updated user Should Pass
azure_rm_aduser_info:
- user_principal_name: "{{ user_id }}"
+ user_principal_name: "{{ user_name }}@{{ domain }}"
register: get_updated_user_should_pass
- name: Assert user was updated and account is disabled
ansible.builtin.assert:
that:
- - "update_user_should_pass['ad_users'][0]['account_enabled'] == False"
+ - "update_user_should_pass['ad_user']['account_enabled'] == False"
- "get_updated_user_should_pass['ad_users'][0]['account_enabled'] == False"
- name: Delete test user
azure_rm_aduser:
- user_principal_name: "{{ user_id }}"
+ user_principal_name: "{{ user_name }}@{{ domain }}"
state: "absent"
register: delete_user_should_pass
- name: User_principal_name Should Fail
azure_rm_aduser_info:
- user_principal_name: "{{ user_id }}"
+ user_principal_name: "{{ user_name }}@{{ domain }}"
register: get_user_should_fail
ignore_errors: true
@@ -91,19 +113,19 @@
- name: Assert task failed
ansible.builtin.assert:
that:
- - "missing_any_identifiers is undefined"
+ - "missing_any_identifiers is defined"
- name: Too many identifiers Should Fail
azure_rm_aduser_info:
- user_principal_name: "{{ user_id }}"
- object_id: "{{ object_id }}"
+ user_principal_name: "{{ user_name }}@{{ domain }}"
+ object_id: "{{ on_premises_immutable_id }}"
register: too_many_identifiers
ignore_errors: true
- name: Assert task failed
ansible.builtin.assert:
that:
- - "too_many_identifiers is undefined"
+ - "too_many_identifiers is defined"
- name: Missing attribute_value Should Fail
azure_rm_aduser_info:
@@ -114,27 +136,27 @@
- name: Assert task failed
ansible.builtin.assert:
that:
- - "missing_attribute_value is undefined"
+ - "missing_attribute_value is defined"
- name: Missing attribute_name Should Fail
azure_rm_aduser_info:
- attribute_value: SMTP:user@contoso.com
+ attribute_value: SMTP:user@stadtluzern.ch
register: missing_attribute_name
ignore_errors: true
- name: Assert task failed
ansible.builtin.assert:
that:
- - "missing_attribute_name is undefined"
+ - "missing_attribute_name is defined"
- name: Using all with principal name should fail
azure_rm_aduser_info:
all: true
- user_principal_name: "{{ user_id }}"
+ user_principal_name: "{{ user_name }}@{{ domain }}"
register: using_all_with_principal_name
ignore_errors: true
- name: Assert task failed
ansible.builtin.assert:
that:
- - "using_all_with_principal_name is undefined"
+ - "using_all_with_principal_name is defined"
diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aks/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aks/tasks/main.yml
index ff60ca681..d3377384c 100644
--- a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aks/tasks/main.yml
+++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aks/tasks/main.yml
@@ -1,21 +1,46 @@
+- name: Gather Resource Group info
+ azure.azcollection.azure_rm_resourcegroup_info:
+ name: "{{ resource_group }}"
+ register: __rg_info
+
- name: Set varialbles
ansible.builtin.set_fact:
rpfx: "{{ resource_group | hash('md5') | truncate(8, True, '') }}"
noderpfx: "{{ resource_group | hash('md5') | truncate(4, True, '') }}"
+ location: "{{ __rg_info.resourcegroups.0.location }}"
+
+- name: Create User Managed Identity
+ azure_rm_resource:
+ resource_group: "{{ resource_group }}"
+ provider: ManagedIdentity
+ resource_type: userAssignedIdentities
+ resource_name: "{{ item }}"
+ api_version: "2023-01-31"
+ body:
+ location: "{{ location }}"
+ state: present
+ loop:
+ - "ansible-test-aks-identity"
+ - "ansible-test-aks-identity-2"
+
+- name: Set identities IDs to test. Identities ansible-test-aks-identity and ansible-test-aks-identity-2 have to be created previously
+ ansible.builtin.set_fact:
+ user_identity: "/subscriptions/{{ azure_subscription_id }}/resourcegroups/{{ resource_group }}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/ansible-test-aks-identity"
+ user_identity_2: "/subscriptions/{{ azure_subscription_id }}/resourcegroups/{{ resource_group }}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/ansible-test-aks-identity-2"
- name: Include aks tasks
ansible.builtin.include_tasks: minimal-cluster.yml
- name: Find available k8s version
azure_rm_aksversion_info:
- location: eastus
+ location: "{{ location }}"
register: versions
- name: Create an AKS instance (check mode)
azure_rm_aks:
name: "aks{{ rpfx }}"
resource_group: "{{ resource_group }}"
- location: eastus
+ location: "{{ location }}"
dns_prefix: "aks{{ rpfx }}"
kubernetes_version: "{{ versions.azure_aks_versions[0] }}"
service_principal:
@@ -56,7 +81,7 @@
azure_rm_aks:
name: "aks{{ rpfx }}"
resource_group: "{{ resource_group }}"
- location: eastus
+ location: "{{ location }}"
dns_prefix: "aks{{ rpfx }}"
kubernetes_version: "{{ versions.azure_aks_versions[0] }}"
service_principal:
@@ -107,7 +132,7 @@
azure_rm_aks:
name: "aks{{ rpfx }}"
resource_group: "{{ resource_group }}"
- location: eastus
+ location: "{{ location }}"
dns_prefix: "aks{{ rpfx }}"
kubernetes_version: "{{ versions.azure_aks_versions[0] }}"
service_principal:
@@ -167,7 +192,7 @@
azure_rm_aks:
name: "aks{{ rpfx }}"
resource_group: "{{ resource_group }}"
- location: eastus
+ location: "{{ location }}"
dns_prefix: "aks{{ rpfx }}"
kubernetes_version: "{{ versions.azure_aks_versions[0] }}"
service_principal:
@@ -198,7 +223,7 @@
- name: Get available version
azure_rm_aksversion_info:
- location: eastus
+ location: "{{ location }}"
version: "{{ versions.azure_aks_versions[0] }}"
register: version1
@@ -206,7 +231,7 @@
azure_rm_aks:
name: "aks{{ rpfx }}"
resource_group: "{{ resource_group }}"
- location: eastus
+ location: "{{ location }}"
dns_prefix: "aks{{ rpfx }}"
kubernetes_version: "{{ version1.azure_aks_versions[0] }}"
service_principal:
@@ -246,7 +271,7 @@
azure_rm_aks:
name: "aks{{ rpfx }}"
resource_group: "{{ resource_group }}"
- location: eastus
+ location: "{{ location }}"
dns_prefix: "aks{{ rpfx }}"
kubernetes_version: "{{ version1.azure_aks_versions[0] }}"
service_principal:
@@ -282,7 +307,7 @@
azure_rm_aks:
name: "aks{{ rpfx }}"
resource_group: "{{ resource_group }}"
- location: eastus
+ location: "{{ location }}"
dns_prefix: "aks{{ rpfx }}"
kubernetes_version: "{{ version1.azure_aks_versions[0] }}"
service_principal:
@@ -323,7 +348,7 @@
azure_rm_aks:
name: "aks{{ rpfx }}"
resource_group: "{{ resource_group }}"
- location: eastus
+ location: "{{ location }}"
dns_prefix: "aks{{ rpfx }}"
kubernetes_version: "{{ version1.azure_aks_versions[0] }}"
service_principal:
@@ -360,7 +385,7 @@
azure_rm_aks:
name: "aks{{ rpfx }}"
resource_group: "{{ resource_group }}"
- location: eastus
+ location: "{{ location }}"
dns_prefix: "aks{{ rpfx }}"
kubernetes_version: "{{ version1.azure_aks_versions[0] }}"
service_principal:
@@ -408,7 +433,7 @@
azure_rm_aks:
name: "aks{{ rpfx }}"
resource_group: "{{ resource_group }}"
- location: eastus
+ location: "{{ location }}"
dns_prefix: "aks{{ rpfx }}"
kubernetes_version: "{{ version1.azure_aks_versions[0] }}"
service_principal:
@@ -449,7 +474,7 @@
azure_rm_aks:
name: "aks{{ rpfx }}"
resource_group: "{{ resource_group }}"
- location: eastus
+ location: "{{ location }}"
dns_prefix: "aks{{ rpfx }}"
kubernetes_version: "{{ version1.azure_aks_versions[0] }}"
service_principal:
@@ -497,7 +522,7 @@
azure_rm_aks:
name: "aks{{ rpfx }}"
resource_group: "{{ resource_group }}"
- location: eastus
+ location: "{{ location }}"
dns_prefix: "aks{{ rpfx }}"
kubernetes_version: "{{ version1.azure_aks_versions[0] }}"
service_principal:
@@ -576,3 +601,15 @@
ansible.builtin.assert:
that:
- "fact.aks | length == 0"
+
+- name: Destroy User Managed Identity
+ azure_rm_resource:
+ resource_group: "{{ resource_group }}"
+ provider: ManagedIdentity
+ resource_type: userAssignedIdentities
+ resource_name: "{{ item }}"
+ api_version: "2023-01-31"
+ state: absent
+ loop:
+ - "ansible-test-aks-identity"
+ - "ansible-test-aks-identity-2"
diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aks/tasks/minimal-cluster.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aks/tasks/minimal-cluster.yml
index 79362b384..3c12fe9b3 100644
--- a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aks/tasks/minimal-cluster.yml
+++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aks/tasks/minimal-cluster.yml
@@ -4,13 +4,13 @@
- name: Find available k8s version
azure_rm_aksversion_info:
- location: eastus
+ location: "{{ location }}"
register: versions
- name: Use minimal parameters and system-assigned identity
azure_rm_aks:
name: "minimal{{ rpfx }}"
- location: eastus
+ location: "{{ location }}"
resource_group: "{{ resource_group }}"
kubernetes_version: "{{ versions.azure_aks_versions[0] }}"
dns_prefix: "aks{{ rpfx }}"
@@ -55,11 +55,13 @@
- name: Use minimal parameters and system-assigned identity (idempotent)
azure_rm_aks:
name: "minimal{{ rpfx }}"
- location: eastus
+ location: "{{ location }}"
resource_group: "{{ resource_group }}"
kubernetes_version: "{{ versions.azure_aks_versions[0] }}"
dns_prefix: "aks{{ rpfx }}"
enable_rbac: true
+ identity:
+ type: "SystemAssigned"
aad_profile:
managed: true
agent_pool_profiles:
@@ -83,10 +85,142 @@
that:
- not output.changed
+- name: Use minimal parameters and user-assigned identity
+ azure_rm_aks:
+ name: "minimal{{ rpfx }}"
+ location: "{{ location }}"
+ resource_group: "{{ resource_group }}"
+ kubernetes_version: "{{ versions.azure_aks_versions[0] }}"
+ dns_prefix: "aks{{ rpfx }}"
+ enable_rbac: true
+ identity:
+ type: "UserAssigned"
+ user_assigned_identities: "{{ user_identity }}"
+ aad_profile:
+ managed: true
+ agent_pool_profiles:
+ - name: default
+ count: 1
+ vm_size: Standard_B2s
+ mode: System
+ api_server_access_profile:
+ authorized_ip_ranges:
+ - "192.0.2.0"
+ - "198.51.100.0"
+ - "203.0.113.0"
+ enable_private_cluster: false
+ network_profile:
+ load_balancer_sku: standard
+ outbound_type: loadBalancer
+ register: output
+
+- name: Assert the AKS instance is well created
+ ansible.builtin.assert:
+ that:
+ - output.changed
+ - output.provisioning_state == 'Succeeded'
+
+- name: Get AKS fact
+ azure_rm_aks_info:
+ name: "minimal{{ rpfx }}"
+ resource_group: "{{ resource_group }}"
+ register: fact
+
+- name: Assert fact returns the created one
+ ansible.builtin.assert:
+ that:
+ - "fact.aks | length == 1"
+ - fact.aks[0].id == output.id
+ - fact.aks[0].aad_profile.managed == true
+ - user_identity in fact.aks[0].identity.user_assigned_identities
+
+- name: Use minimal parameters and user-assigned identity (idempotent)
+ azure_rm_aks:
+ name: "minimal{{ rpfx }}"
+ location: "{{ location }}"
+ resource_group: "{{ resource_group }}"
+ kubernetes_version: "{{ versions.azure_aks_versions[0] }}"
+ dns_prefix: "aks{{ rpfx }}"
+ enable_rbac: true
+ identity:
+ type: "UserAssigned"
+ user_assigned_identities: "{{ user_identity }}"
+ aad_profile:
+ managed: true
+ agent_pool_profiles:
+ - name: default
+ count: 1
+ vm_size: Standard_B2s
+ mode: System
+ api_server_access_profile:
+ authorized_ip_ranges:
+ - "192.0.2.0"
+ - "198.51.100.0"
+ - "203.0.113.0"
+ enable_private_cluster: false
+ network_profile:
+ load_balancer_sku: standard
+ outbound_type: loadBalancer
+ register: output
+
+- name: Assert idempotent
+ ansible.builtin.assert:
+ that:
+ - not output.changed
+
+- name: Use minimal parameters and user-assigned 2 identity
+ azure_rm_aks:
+ name: "minimal{{ rpfx }}"
+ location: "{{ location }}"
+ resource_group: "{{ resource_group }}"
+ kubernetes_version: "{{ versions.azure_aks_versions[0] }}"
+ dns_prefix: "aks{{ rpfx }}"
+ enable_rbac: true
+ identity:
+ type: "UserAssigned"
+ user_assigned_identities: "{{ user_identity_2 }}"
+ aad_profile:
+ managed: true
+ agent_pool_profiles:
+ - name: default
+ count: 1
+ vm_size: Standard_B2s
+ mode: System
+ api_server_access_profile:
+ authorized_ip_ranges:
+ - "192.0.2.0"
+ - "198.51.100.0"
+ - "203.0.113.0"
+ enable_private_cluster: false
+ network_profile:
+ load_balancer_sku: standard
+ outbound_type: loadBalancer
+ register: output
+
+- name: Assert the AKS instance is well created
+ ansible.builtin.assert:
+ that:
+ - output.changed
+ - output.provisioning_state == 'Succeeded'
+
+- name: Get AKS fact
+ azure_rm_aks_info:
+ name: "minimal{{ rpfx }}"
+ resource_group: "{{ resource_group }}"
+ register: fact
+
+- name: Assert fact returns the created one
+ ansible.builtin.assert:
+ that:
+ - "fact.aks | length == 1"
+ - fact.aks[0].id == output.id
+ - fact.aks[0].aad_profile.managed == true
+ - user_identity_2 in fact.aks[0].identity.user_assigned_identities
+
- name: Update api_server_access_profile config
azure_rm_aks:
name: "minimal{{ rpfx }}"
- location: eastus
+ location: "{{ location }}"
resource_group: "{{ resource_group }}"
kubernetes_version: "{{ versions.azure_aks_versions[0] }}"
dns_prefix: "aks{{ rpfx }}"
diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aksagentpool/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aksagentpool/tasks/main.yml
index 7c255f9ad..20a5a8e76 100644
--- a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aksagentpool/tasks/main.yml
+++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_aksagentpool/tasks/main.yml
@@ -7,6 +7,25 @@
location: eastus
register: versions
+- name: Create proximity placement group
+ azure_rm_proximityplacementgroup:
+ resource_group: "{{ resource_group }}"
+ name: "proxi{{ rpfx }}"
+ register: proxi_output
+
+- name: Create public ip prefix
+ azure_rm_publicipprefix:
+ resource_group: "{{ resource_group }}"
+ name: "pipprefix{{ rpfx }}"
+ zones:
+ - 1
+ public_ip_address_version: IPV4
+ prefix_length: 29
+ sku:
+ name: Standard
+ tier: Regional
+ register: pip_output
+
- name: Create a kubernet service with minimal parameters
azure_rm_aks:
name: "min{{ rpfx }}"
@@ -24,6 +43,7 @@
mode: System
api_server_access_profile:
authorized_ip_ranges:
+ - "{{ pip_output.state.ip_prefix }}"
- "192.0.2.0"
- "198.51.100.0"
- "203.0.113.0"
@@ -194,6 +214,97 @@
- output.aks_agent_pools[0].max_pods == 42
- output.aks_agent_pools[0].orchestrator_version == agentpool_version.azure_orchestrator_version[0]
+- name: Create a new agent pool with multi parameters
+ azure_rm_aksagentpool:
+ resource_group: "{{ resource_group }}"
+ cluster_name: "min{{ rpfx }}"
+ name: default06
+ count: 1
+ vm_size: Standard_B2s
+ type_properties_type: VirtualMachineScaleSets
+ mode: System
+ node_labels: {"release":"stable"}
+ max_pods: 42
+ enable_auto_scaling: true
+ min_count: 1
+ max_count: 10
+ orchestrator_version: "{{ agentpool_version.azure_orchestrator_version[0] }}"
+ availability_zones:
+ - 1
+ kubelet_disk_type: OS
+ workload_runtime: OCIContainer
+ os_sku: Ubuntu
+ scale_down_mode: Delete
+ upgrade_settings:
+ max_surge: 50%
+ power_state:
+ code: Running
+ enable_node_public_ip: true
+ scale_set_priority: Regular
+ node_public_ip_prefix_id: "{{ pip_output.state.id }}"
+ spot_max_price: 85
+ proximity_placement_group_id: "{{ proxi_output.state.id }}"
+ enable_encryption_at_host: false
+ enable_ultra_ssd: false
+ enable_fips: true
+ tags:
+ key2: value2
+ register: output
+
+- name: Assert the node agent pool create well
+ ansible.builtin.assert:
+ that:
+ - output.changed
+
+- name: Create a new agent pool with multi parameters( Idempotent test)
+ azure_rm_aksagentpool:
+ resource_group: "{{ resource_group }}"
+ cluster_name: "min{{ rpfx }}"
+ name: default06
+ count: 1
+ vm_size: Standard_B2s
+ type_properties_type: VirtualMachineScaleSets
+ mode: System
+ node_labels: {"release":"stable"}
+ max_pods: 42
+ enable_auto_scaling: true
+ min_count: 1
+ max_count: 10
+ orchestrator_version: "{{ agentpool_version.azure_orchestrator_version[0] }}"
+ availability_zones:
+ - 1
+ kubelet_disk_type: OS
+ workload_runtime: OCIContainer
+ os_sku: Ubuntu
+ scale_down_mode: Delete
+ upgrade_settings:
+ max_surge: 50%
+ power_state:
+ code: Running
+ enable_node_public_ip: true
+ scale_set_priority: Regular
+ node_public_ip_prefix_id: "{{ pip_output.state.id }}"
+ spot_max_price: 85
+ proximity_placement_group_id: "{{ proxi_output.state.id }}"
+ enable_encryption_at_host: false
+ enable_ultra_ssd: false
+ enable_fips: true
+ tags:
+ key2: value2
+ register: output
+
+- name: Assert the node agent pool no change
+ ansible.builtin.assert:
+ that:
+ - not output.changed
+
+- name: Delete node agent pool
+ azure_rm_aksagentpool:
+ resource_group: "{{ resource_group }}"
+ cluster_name: "min{{ rpfx }}"
+ name: default06
+ state: absent
+
- name: Delete node agent pool
azure_rm_aksagentpool:
resource_group: "{{ resource_group }}"
diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cognitivesearch/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cognitivesearch/tasks/main.yml
index bcc19ced0..0b8c3edfa 100644
--- a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cognitivesearch/tasks/main.yml
+++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cognitivesearch/tasks/main.yml
@@ -139,7 +139,7 @@
hosting_mode: default
identity: SystemAssigned
network_rule_set:
- - 8.8.8.8/31
+ - 8.8.8.8
- 1.1.1.1
partition_count: 2
public_network_access: disabled
@@ -171,7 +171,7 @@
hosting_mode: default
identity: SystemAssigned
network_rule_set:
- - 8.8.8.8/31
+ - 8.8.8.8
- 1.1.1.1
partition_count: 2
public_network_access: disabled
diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cosmosdbaccount/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cosmosdbaccount/tasks/main.yml
index 5e1f3f4d8..64aaef57c 100644
--- a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cosmosdbaccount/tasks/main.yml
+++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_cosmosdbaccount/tasks/main.yml
@@ -29,9 +29,9 @@
azure_rm_cosmosdbaccount:
resource_group: "{{ resource_group }}"
name: "{{ dbname }}"
- location: eastasia
+ location: eastus
geo_rep_locations:
- - name: eastasia
+ - name: eastus
failover_priority: 0
database_account_offer_type: Standard
check_mode: true
@@ -45,10 +45,10 @@
azure_rm_cosmosdbaccount:
resource_group: "{{ resource_group }}"
name: "{{ dbname }}"
- location: eastasia
+ location: eastus
kind: global_document_db
geo_rep_locations:
- - name: eastasia
+ - name: eastus
failover_priority: 0
- name: westus
failover_priority: 1
@@ -70,10 +70,10 @@
azure_rm_cosmosdbaccount:
resource_group: "{{ resource_group }}"
name: "{{ dbname }}"
- location: eastasia
+ location: eastus
kind: global_document_db
geo_rep_locations:
- - name: eastasia
+ - name: eastus
failover_priority: 0
- name: westus
failover_priority: 1
@@ -95,10 +95,10 @@
azure_rm_cosmosdbaccount:
resource_group: "{{ resource_group }}"
name: "{{ dbname }}"
- location: eastasia
+ location: eastus
kind: global_document_db
geo_rep_locations:
- - name: eastasia
+ - name: eastus
failover_priority: 0
- name: westus
failover_priority: 1
@@ -121,10 +121,10 @@
azure_rm_cosmosdbaccount:
resource_group: "{{ resource_group_secondary }}"
name: "{{ db2name }}"
- location: eastasia
+ location: eastus
kind: global_document_db
geo_rep_locations:
- - name: eastasia
+ - name: eastus
failover_priority: 0
- name: westus
failover_priority: 1
@@ -155,7 +155,7 @@
- output.accounts[0]['id'] != None
- output.accounts[0]['resource_group'] == resource_group
- output.accounts[0]['name'] == dbname
- - output.accounts[0]['location'] == 'eastasia'
+ - output.accounts[0]['location'] == 'eastus'
- output.accounts[0]['kind'] != None
- output.accounts[0]['consistency_policy'] != None
- output.accounts[0]['failover_policies'] != None
@@ -221,7 +221,7 @@
- output.accounts[0]['id'] != None
- output.accounts[0]['resource_group'] == resource_group
- output.accounts[0]['name'] == dbname
- - output.accounts[0]['location'] == 'eastasia'
+ - output.accounts[0]['location'] == 'eastus'
- output.accounts[0]['kind'] != None
- output.accounts[0]['consistency_policy'] != None
- output.accounts[0]['failover_policies'] != None
@@ -258,7 +258,7 @@
azure_rm_cosmosdbaccount:
resource_group: "{{ resource_group }}"
name: "{{ dbname }}-free4"
- location: eastasia
+ location: eastus
kind: mongo_db
mongo_version: "4.0"
enable_free_tier: "{{ free_tier_supported }}"
@@ -267,7 +267,7 @@
- "1.1.1.1"
- "2.2.2.2/28"
geo_rep_locations:
- - name: eastasia
+ - name: eastus
failover_priority: 0
- name: westus
failover_priority: 1
diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_datalakestore/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_datalakestore/tasks/main.yml
deleted file mode 100644
index 8dc08f8b7..000000000
--- a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_datalakestore/tasks/main.yml
+++ /dev/null
@@ -1,199 +0,0 @@
-- name: Create data lake store name
- ansible.builtin.set_fact:
- adl_name: "adl{{ resource_group_datalake | hash('md5') | truncate(21, True, '') }}"
- vnet_name: "vnet{{ resource_group_datalake | hash('md5') | truncate(20, True, '') }}"
-
-- name: Create virtual network
- azure_rm_virtualnetwork:
- name: "{{ vnet_name }}"
- resource_group: "{{ resource_group_datalake }}"
- address_prefixes_cidr:
- - 10.1.0.0/16
- register: vnet_output
-
-- name: Create subnet
- azure_rm_subnet:
- name: foobar
- virtual_network_name: "{{ vnet_name }}"
- resource_group: "{{ resource_group_datalake }}"
- address_prefix_cidr: "10.1.1.0/24"
- service_endpoints:
- - service: Microsoft.AzureActiveDirectory
- register: subnet_output
-
-- name: Create minimal data lake store
- azure_rm_datalakestore:
- resource_group: "{{ resource_group_datalake }}"
- name: "{{ adl_name }}"
- register: output
-
-- name: Assert status succeeded and results
- ansible.builtin.assert:
- that:
- - output.changed
- - output.state.id is defined
- - output.state.account_id is defined
- - output.state.creation_time is defined
- - output.state.current_tier == "Consumption"
- - output.state.encryption_state == "Enabled"
- - output.state.endpoint == "{{ adl_name }}.azuredatalakestore.net"
- - output.state.firewall_allow_azure_ips == "Disabled"
- - output.state.firewall_rules | length == 0
- - output.state.firewall_state == "Disabled"
- - output.state.last_modified_time is defined
- - output.state.new_tier == "Consumption"
- - output.state.provisioning_state == "Succeeded"
- - output.state.trusted_id_provider_state == "Disabled"
-
-- name: Create minimal data lake store (Idempotence)
- azure_rm_datalakestore:
- resource_group: "{{ resource_group_datalake }}"
- name: "{{ adl_name }}"
- register: output
-
-- name: Assert that status has not changed
- ansible.builtin.assert:
- that:
- - not output.changed
-
-- name: Update data lake store to add virtual_network_rules
- azure_rm_datalakestore:
- resource_group: "{{ resource_group_datalake }}"
- name: "{{ adl_name }}"
- virtual_network_rules:
- - name: vnet_rule_1
- subnet_id: "{{ subnet_output.state.id }}"
- register: output
-
-- name: Assert status succeeded and results include virtual_network_rules
- ansible.builtin.assert:
- that:
- - output.changed
- - output.state.virtual_network_rules | length == 1
- - output.state.virtual_network_rules[0].name == "vnet_rule_1"
- - output.state.virtual_network_rules[0].subnet_id == subnet_output.state.id
-
-- name: Update data lake store to change encryption state that must fail
- azure_rm_datalakestore:
- resource_group: "{{ resource_group_datalake }}"
- name: "{{ adl_name }}"
- encryption_state: Disabled
- register: output
- ignore_errors: true
-
-- name: Assert that encryption state cannot change
- ansible.builtin.assert:
- that:
- - not output.changed
- - output.msg == 'Encryption type cannot be updated.'
-
-- name: Update data lake store to add new_tier
- azure_rm_datalakestore:
- resource_group: "{{ resource_group_datalake }}"
- name: "{{ adl_name }}"
- new_tier: Commitment_1TB
- register: output
-
-- name: Assert status succeeded and results include virtual_network_rules
- ansible.builtin.assert:
- that:
- - output.changed
- - output.state.current_tier == "Consumption"
- - output.state.new_tier == "Commitment_1TB"
-
-- name: Delete minimal data lake store
- azure_rm_datalakestore:
- resource_group: "{{ resource_group_datalake }}"
- name: "{{ adl_name }}"
- state: absent
- register: output
-
-- name: Create new data lake store
- azure_rm_datalakestore:
- resource_group: "{{ resource_group_datalake }}"
- name: "{{ adl_name }}"
- tags:
- P1: V1
- P2: V4
- P3: V3
- new_tier: Commitment_1TB
- default_group: default_group_test
- encryption_state: Enabled
- firewall_state: Enabled
- firewall_allow_azure_ips: Enabled
- firewall_rules:
- - name: test_rule_1
- start_ip_address: 192.168.1.1
- end_ip_address: 192.168.1.254
- - name: test_rule_2
- start_ip_address: 10.0.0.1
- end_ip_address: 10.1.0.1
- virtual_network_rules:
- - name: vnet_rule_1
- subnet_id: "{{ subnet_output.state.id }}"
- register: output
-
-- name: Assert status succeeded and results include an Id value
- ansible.builtin.assert:
- that:
- - output.changed
- - output.state.id is defined
- - output.state.account_id is defined
- - output.state.creation_time is defined
- - output.state.current_tier == "Commitment_1TB"
- - output.state.default_group == "default_group_test"
- - output.state.encryption_state == "Enabled"
- - output.state.endpoint == "{{ adl_name }}.azuredatalakestore.net"
- - output.state.firewall_allow_azure_ips == "Enabled"
- - output.state.firewall_rules | length == 2
- - output.state.firewall_state == "Enabled"
- - output.state.last_modified_time is defined
- - output.state.new_tier == "Commitment_1TB"
- - output.state.provisioning_state == "Succeeded"
- - output.state.tags | length == 3
- - output.state.trusted_id_provider_state == "Disabled"
- - output.state.virtual_network_rules | length == 1
- - output.state.virtual_network_rules[0].name == "vnet_rule_1"
- - output.state.virtual_network_rules[0].subnet_id == subnet_output.state.id
-
-- name: Create new data lake store (Idempotence)
- azure_rm_datalakestore:
- resource_group: "{{ resource_group_datalake }}"
- name: "{{ adl_name }}"
- tags:
- P1: V1
- P2: V4
- P3: V3
- new_tier: Commitment_1TB
- default_group: default_group_test
- encryption_state: Enabled
- firewall_state: Enabled
- firewall_allow_azure_ips: Enabled
- firewall_rules:
- - name: test_rule_1
- start_ip_address: 192.168.1.1
- end_ip_address: 192.168.1.254
- - name: test_rule_2
- start_ip_address: 10.0.0.1
- end_ip_address: 10.1.0.1
- virtual_network_rules:
- - name: vnet_rule_1
- subnet_id: "{{ subnet_output.state.id }}"
- register: output
-
-- name: Assert that status has not changed
- ansible.builtin.assert:
- that:
- - not output.changed
-
-- name: Delete virtual network
- azure_rm_virtualnetwork:
- name: "{{ vnet_name }}"
- resource_group: "{{ resource_group_datalake }}"
- state: absent
-
-- name: Delete Data Lake Store
- azure_rm_datalakestore:
- resource_group: "{{ resource_group_datalake }}"
- name: "{{ adl_name }}"
- state: absent
diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_eventhub/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_eventhub/tasks/main.yml
index 62e60e95b..579d326d8 100644
--- a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_eventhub/tasks/main.yml
+++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_eventhub/tasks/main.yml
@@ -76,10 +76,10 @@
namespace_name: "{{ namespace_name }}"
name: "{{ name_rpfx }}"
resource_group: "{{ resource_group }}"
- message_retention_in_days: 4
+ message_retention_in_days: 1
state: present
tags:
- test: modified
+ test1: modified1
register: results
- name: Assert the event hub updated
diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_expressroute/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_expressroute/tasks/main.yml
index 3d70ddd82..58bda9cce 100644
--- a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_expressroute/tasks/main.yml
+++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_expressroute/tasks/main.yml
@@ -14,9 +14,9 @@
authorizations:
- name: authorization_test
service_provider_properties:
- service_provider_name: Aryaka Networks
- peering_location: Seattle
- bandwidth_in_mbps: '200'
+ service_provider_name: Telstra Test
+ peering_location: Denver Test
+ bandwidth_in_mbps: 1000
sku:
tier: premium
family: metereddata
@@ -40,9 +40,9 @@
authorizations:
- name: authorization_test
service_provider_properties:
- service_provider_name: Aryaka Networks
- peering_location: Seattle
- bandwidth_in_mbps: '200'
+ service_provider_name: Telstra Test
+ peering_location: Denver Test
+ bandwidth_in_mbps: 1000
sku:
tier: premium
family: metereddata
@@ -65,9 +65,9 @@
authorizations:
- name: authorization_test
service_provider_properties:
- service_provider_name: Aryaka Networks
- peering_location: Seattle
- bandwidth_in_mbps: '200'
+ service_provider_name: Telstra Test
+ peering_location: Denver Test
+ bandwidth_in_mbps: 1000
sku:
tier: premium
family: metereddata
diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_image/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_image/tasks/main.yml
index 2fc543091..3e9e32f47 100644
--- a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_image/tasks/main.yml
+++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_image/tasks/main.yml
@@ -33,9 +33,12 @@
azure_rm_networkinterface:
resource_group: "{{ resource_group }}"
name: "{{ vm_name }}"
+ ip_configurations:
+ - name: default
+ public_ip_address_name: "{{ public_ip_name }}"
+ primary: true
virtual_network: "{{ vm_name }}"
subnet: "{{ vm_name }}"
- public_ip_name: "{{ public_ip_name }}"
security_group: "{{ security_group_name }}"
- name: Create virtual machine
diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_loadbalancer/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_loadbalancer/tasks/main.yml
index 913e618d0..692590994 100644
--- a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_loadbalancer/tasks/main.yml
+++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_loadbalancer/tasks/main.yml
@@ -32,7 +32,9 @@
azure_rm_loadbalancer:
resource_group: '{{ resource_group }}'
name: "{{ lbname_a }}"
- public_ip: "{{ pipaname }}"
+ frontend_ip_configurations:
+ - name: frontendip0
+ public_ip_address: "{{ pipaname }}"
check_mode: true
register: output
@@ -44,7 +46,9 @@
azure_rm_loadbalancer:
resource_group: '{{ resource_group }}'
name: "{{ lbname_a }}"
- public_ip: "{{ pipaname }}"
+ frontend_ip_configurations:
+ - name: frontendip0
+ public_ip_address: "{{ pipaname }}"
register: output
- name: Assert load balancer created
@@ -55,7 +59,9 @@
azure_rm_loadbalancer:
resource_group: '{{ resource_group }}'
name: "{{ lbname_a }}"
- public_ip: "{{ pipaname }}"
+ frontend_ip_configurations:
+ - name: frontendip0
+ public_ip_address: "{{ pipaname }}"
register: output
- name: Assert no change
@@ -90,20 +96,34 @@
resource_group: '{{ resource_group }}'
name: "{{ lbname_b }}"
sku: Standard
- public_ip_address: "{{ pipbname }}"
- probe_protocol: Tcp
- probe_port: 80
- probe_interval: 10
- probe_fail_count: 3
- protocol: Tcp
- load_distribution: Default
- frontend_port: 80
- backend_port: 8080
- idle_timeout: 4
- natpool_frontend_port_start: 30
- natpool_frontend_port_end: 40
- natpool_backend_port: 80
- natpool_protocol: Tcp
+ frontend_ip_configurations:
+ - name: frontendip0
+ public_ip_address: "{{ pipbname }}"
+ backend_address_pools:
+ - name: backendaddrp0
+ probes:
+ - name: prob0
+ port: 80
+ protocol: Tcp
+ interval: 10
+ fail_count: 3
+ inbound_nat_pools:
+ - name: inboundnatp0
+ frontend_ip_configuration_name: frontendip0
+ protocol: Tcp
+ frontend_port_range_start: 30
+ frontend_port_range_end: 40
+ backend_port: 80
+ load_balancing_rules:
+ - name: lbr
+ frontend_ip_configuration: frontendip0
+ backend_address_pool: backendaddrp0
+ probe: prob0
+ frontend_port: 80
+ backend_port: 8080
+ idle_timeout: 4
+ load_distribution: Default
+ protocol: Tcp
register: output
- name: Assert complex load balancer created
@@ -117,20 +137,34 @@
resource_group: '{{ resource_group }}'
name: "{{ lbname_b }}"
sku: Standard
- public_ip_address: "{{ pipbname }}"
- probe_protocol: Tcp
- probe_port: 80
- probe_interval: 10
- probe_fail_count: 3
- protocol: Tcp
- load_distribution: Default
- frontend_port: 80
- backend_port: 8080
- idle_timeout: 4
- natpool_frontend_port_start: 30
- natpool_frontend_port_end: 40
- natpool_backend_port: 80
- natpool_protocol: Tcp
+ frontend_ip_configurations:
+ - name: frontendip0
+ public_ip_address: "{{ pipbname }}"
+ backend_address_pools:
+ - name: backendaddrp0
+ probes:
+ - name: prob0
+ port: 80
+ protocol: Tcp
+ interval: 10
+ fail_count: 3
+ inbound_nat_pools:
+ - name: inboundnatp0
+ frontend_ip_configuration_name: frontendip0
+ protocol: Tcp
+ frontend_port_range_start: 30
+ frontend_port_range_end: 40
+ backend_port: 80
+ load_balancing_rules:
+ - name: lbr
+ frontend_ip_configuration: frontendip0
+ backend_address_pool: backendaddrp0
+ probe: prob0
+ frontend_port: 80
+ backend_port: 8080
+ idle_timeout: 4
+ load_distribution: Default
+ protocol: Tcp
register: output
- name: Assert that output has not changed
@@ -143,20 +177,34 @@
resource_group: '{{ resource_group }}'
name: "{{ lbname_b }}"
sku: Standard
- public_ip_address: "{{ pipbname }}"
- probe_protocol: Tcp
- probe_port: 80
- probe_interval: 10
- probe_fail_count: 3
- protocol: Tcp
- load_distribution: Default
- frontend_port: 81
- backend_port: 8080
- idle_timeout: 4
- natpool_frontend_port_start: 30
- natpool_frontend_port_end: 40
- natpool_backend_port: 80
- natpool_protocol: Tcp
+ frontend_ip_configurations:
+ - name: frontendip0
+ public_ip_address: "{{ pipbname }}"
+ backend_address_pools:
+ - name: backendaddrp0
+ probes:
+ - name: prob0
+ port: 80
+ protocol: Tcp
+ interval: 10
+ fail_count: 3
+ inbound_nat_pools:
+ - name: inboundnatp0
+ frontend_ip_configuration_name: frontendip0
+ protocol: Tcp
+ frontend_port_range_start: 30
+ frontend_port_range_end: 40
+ backend_port: 80
+ load_balancing_rules:
+ - name: lbr
+ frontend_ip_configuration: frontendip0
+ backend_address_pool: backendaddrp0
+ probe: prob0
+ frontend_port: 81
+ backend_port: 8080
+ idle_timeout: 4
+ load_distribution: Default
+ protocol: Tcp
register: output
- name: Assert that output has changed
diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_localnetworkgateway/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_localnetworkgateway/aliases
new file mode 100644
index 000000000..aa77c071a
--- /dev/null
+++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_localnetworkgateway/aliases
@@ -0,0 +1,3 @@
+cloud/azure
+shippable/azure/group2
+destructive
diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_datalakestore/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_localnetworkgateway/meta/main.yml
index 95e1952f9..95e1952f9 100644
--- a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_datalakestore/meta/main.yml
+++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_localnetworkgateway/meta/main.yml
diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_localnetworkgateway/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_localnetworkgateway/tasks/main.yml
new file mode 100644
index 000000000..d7447111c
--- /dev/null
+++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_localnetworkgateway/tasks/main.yml
@@ -0,0 +1,114 @@
+- name: Set Storage Account Names
+ ansible.builtin.set_fact:
+ rpfx: "{{ resource_group | hash('md5') | truncate(21, True, '') }}"
+
+- name: Create a new local network gateway (Check mode)
+ azure_rm_localnetworkgateway:
+ resource_group: "{{ resource_group }}"
+ name: "{{ rpfx }}"
+ local_network_address_space:
+ address_prefixes:
+ - 10.0.0.0/24
+ - 20.0.0.0/24
+ fqdn: testfqdn.com
+ bgp_settings:
+ asn: 8
+ bgp_peering_address: 10.3.0.1
+ peer_weight: 3
+ tags:
+ key1: value1
+ check_mode: true
+
+- name: Create a new local network gateway
+ azure_rm_localnetworkgateway:
+ resource_group: "{{ resource_group }}"
+ name: "{{ rpfx }}"
+ local_network_address_space:
+ address_prefixes:
+ - 10.0.0.0/24
+ - 20.0.0.0/24
+ fqdn: testfqdn.com
+ bgp_settings:
+ asn: 8
+ bgp_peering_address: 10.3.0.1
+ peer_weight: 3
+ tags:
+ key1: value1
+ register: output
+
+- name: Assert the local network gateway is well created
+ ansible.builtin.assert:
+ that:
+ - output.changed
+ - output.state.provisioning_state == 'Succeeded'
+
+- name: Create a new local network gateway(Idempotent test)
+ azure_rm_localnetworkgateway:
+ resource_group: "{{ resource_group }}"
+ name: "{{ rpfx }}"
+ local_network_address_space:
+ address_prefixes:
+ - 10.0.0.0/24
+ - 20.0.0.0/24
+ fqdn: testfqdn.com
+ bgp_settings:
+ asn: 8
+ bgp_peering_address: 10.3.0.1
+ peer_weight: 3
+ tags:
+ key1: value1
+ register: output
+
+- name: Assert the local network gateway no change
+ ansible.builtin.assert:
+ that:
+ - not output.changed
+
+- name: Create a new local network gateway(Update test)
+ azure_rm_localnetworkgateway:
+ resource_group: "{{ resource_group }}"
+ name: "{{ rpfx }}"
+ local_network_address_space:
+ address_prefixes:
+ - 10.0.0.0/24
+ - 30.0.0.0/24
+ fqdn: testfqdn1.com
+ bgp_settings:
+ asn: 10
+ bgp_peering_address: 20.3.0.1
+ peer_weight: 5
+ tags:
+ key1: value1
+ register: output
+
+- name: Assert the local network gateway updated
+ ansible.builtin.assert:
+ that:
+ - output.changed
+
+- name: Get a new local network gateway
+ azure_rm_localnetworkgateway_info:
+ resource_group: "{{ resource_group }}"
+ name: "{{ rpfx }}"
+ register: output
+
+- name: Assert the local network gateway facts
+ ansible.builtin.assert:
+ that:
+ - not output.changed
+ - output.state[0].bgp_settings.asn == 10
+ - output.state[0].bgp_settings.peer_weight == 5
+ - "output.state[0].tags | length == 1"
+ - "output.state[0].local_network_address_space.address_prefixes | length == 3"
+
+- name: Delete the local network gateway
+ azure_rm_localnetworkgateway:
+ resource_group: "{{ resource_group }}"
+ name: "{{ rpfx }}"
+ state: absent
+ register: output
+
+- name: Assert the local network gateway is deleted
+ ansible.builtin.assert:
+ that:
+ - output.changed
diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_mariadbserver/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_mariadbserver/aliases
index b586dc7c3..0cfc7d52b 100644
--- a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_mariadbserver/aliases
+++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_mariadbserver/aliases
@@ -6,3 +6,4 @@ azure_rm_mariadbdatabase
azure_rm_mariadbdatabase_facts
azure_rm_mariadbfirewallrule
azure_rm_mariadbfirewallrule_facts
+disabled
diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_mysqlserver/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_mysqlserver/aliases
index 21e7a127b..98ddd39ff 100644
--- a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_mysqlserver/aliases
+++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_mysqlserver/aliases
@@ -8,3 +8,4 @@ azure_rm_mysqlfirewallrule
azure_rm_mysqlfirewallrule_facts
azure_rm_mysqlconfiguration
azure_rm_mysqlconfiguration_facts
+disabled
diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_networkinterface/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_networkinterface/tasks/main.yml
index e47906d72..4955230f2 100644
--- a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_networkinterface/tasks/main.yml
+++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_networkinterface/tasks/main.yml
@@ -87,8 +87,7 @@
resource_group: "{{ resource_group }}"
name: "tn{{ rpfx }}"
virtual_network: "{{ vn.state.id }}"
- subnet: "tn{{ rpfx }}"
- public_ip: false
+ subnet_name: "tn{{ rpfx }}"
create_with_security_group: false
register: output
check_mode: true
@@ -105,8 +104,7 @@
virtual_network:
name: "tn{{ rpfx }}"
resource_group: "{{ resource_group_secondary }}"
- subnet: "tn{{ rpfx }}"
- public_ip: false
+ subnet_name: "tn{{ rpfx }}"
create_with_security_group: false
register: output
@@ -146,7 +144,7 @@
private_ip_address: "{{ facts.networkinterfaces[0].ip_configurations[0].private_ip_address }}"
private_ip_allocation_method: "{{ facts.networkinterfaces[0].ip_configurations[0].private_ip_allocation_method }}"
primary: "{{ facts.networkinterfaces[0].ip_configurations[0].primary }}"
- subnet: "{{ facts.networkinterfaces[0].subnet }}"
+ subnet_name: "{{ facts.networkinterfaces[0].subnet }}"
register: output
- name: Assert the NIC created
@@ -159,9 +157,8 @@
resource_group: "{{ resource_group }}"
name: "tn{{ rpfx }}"
virtual_network: "{{ vn.state.id }}"
- subnet: "tn{{ rpfx }}"
+ subnet_name: "tn{{ rpfx }}"
create_with_security_group: false
- public_ip: false
register: output
- name: Assert NIC created
@@ -174,8 +171,7 @@
resource_group: "{{ resource_group }}"
name: "tn{{ rpfx }}"
virtual_network: "{{ vn.state.id }}"
- subnet: "tn{{ rpfx }}"
- public_ip: false
+ subnet_name: "tn{{ rpfx }}"
security_group: "tn{{ rpfx }}sg"
register: output
check_mode: true
@@ -190,8 +186,10 @@
resource_group: "{{ resource_group }}"
name: "tn{{ rpfx }}"
virtual_network: "{{ vn.state.id }}"
- subnet: "tn{{ rpfx }}"
- public_ip_address_name: "tn{{ rpfx }}"
+ subnet_name: "tn{{ rpfx }}"
+ ip_configurations:
+ - name: default
+ public_ip_address_name: "tn{{ rpfx }}"
create_with_security_group: false
register: output
check_mode: true
@@ -206,10 +204,9 @@
resource_group: "{{ resource_group }}"
name: "tn{{ rpfx }}"
virtual_network: "{{ vn.state.id }}"
- subnet: "tn{{ rpfx }}"
+ subnet_name: "tn{{ rpfx }}"
enable_accelerated_networking: true
create_with_security_group: false
- public_ip: false
register: output
check_mode: true
@@ -223,10 +220,9 @@
resource_group: "{{ resource_group }}"
name: "tn{{ rpfx }}"
virtual_network: "{{ vn.state.id }}"
- subnet: "tn{{ rpfx }}"
+ subnet_name: "tn{{ rpfx }}"
create_with_security_group: false
enable_ip_forwarding: true
- public_ip: false
register: output
check_mode: true
@@ -240,9 +236,8 @@
resource_group: "{{ resource_group }}"
name: "tn{{ rpfx }}"
virtual_network: "{{ vn.state.id }}"
- subnet: "tn{{ rpfx }}"
+ subnet_name: "tn{{ rpfx }}"
create_with_security_group: false
- public_ip: false
dns_servers:
- 8.9.10.11
- 7.8.9.10
@@ -259,7 +254,7 @@
resource_group: "{{ resource_group }}"
name: "tn{{ rpfx }}"
virtual_network: "{{ vn.state.id }}"
- subnet: "tn{{ rpfx }}"
+ subnet_name: "tn{{ rpfx }}"
enable_accelerated_networking: true
enable_ip_forwarding: true
security_group: "tn{{ rpfx }}sg"
@@ -300,7 +295,7 @@
resource_group: "{{ resource_group }}"
name: "tn{{ rpfx }}"
virtual_network: "{{ vn.state.id }}"
- subnet: "tn{{ rpfx }}"
+ subnet_name: "tn{{ rpfx }}"
enable_accelerated_networking: true
security_group: "tn{{ rpfx }}sg"
enable_ip_forwarding: true
@@ -355,7 +350,7 @@
resource_group: "{{ resource_group }}"
name: "tn{{ rpfx }}"
virtual_network: "{{ vn.state.id }}"
- subnet: "tn{{ rpfx }}"
+ subnet_name: "tn{{ rpfx }}"
security_group: "tn{{ rpfx }}sg"
enable_accelerated_networking: true
enable_ip_forwarding: true
@@ -462,9 +457,8 @@
resource_group: "{{ resource_group }}"
name: "{{ nic_name1 }}"
virtual_network: "{{ vn.state.id }}"
- subnet: "tn{{ rpfx }}"
+ subnet_name: "tn{{ rpfx }}"
create_with_security_group: true
- public_ip: false
ip_configurations:
- name: ipconfig1
application_security_groups:
@@ -483,9 +477,8 @@
resource_group: "{{ resource_group }}"
name: "{{ nic_name1 }}"
virtual_network: "{{ vn.state.id }}"
- subnet: "tn{{ rpfx }}"
+ subnet_name: "tn{{ rpfx }}"
create_with_security_group: true
- public_ip: false
ip_configurations:
- name: ipconfig1
application_security_groups:
@@ -504,9 +497,8 @@
resource_group: "{{ resource_group }}"
name: "{{ nic_name1 }}"
virtual_network: "{{ vn.state.id }}"
- subnet: "tn{{ rpfx }}"
+ subnet_name: "tn{{ rpfx }}"
create_with_security_group: true
- public_ip: false
ip_configurations:
- name: ipconfig1
application_security_groups:
@@ -598,7 +590,6 @@
virtual_network: "tn{{ rpfx }}"
subnet_name: "nic-appgw-subnet{{ rpfx }}"
create_with_security_group: false
- public_ip: false
ip_configurations:
- name: "default"
primary: true
@@ -621,7 +612,6 @@
virtual_network: "tn{{ rpfx }}"
subnet_name: "nic-appgw-subnet{{ rpfx }}"
create_with_security_group: false
- public_ip: false
ip_configurations:
- name: "default"
primary: true
diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_postgresqlflexibleserver/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_postgresqlflexibleserver/tasks/main.yml
index 2add54e1e..9880cdc3c 100644
--- a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_postgresqlflexibleserver/tasks/main.yml
+++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_postgresqlflexibleserver/tasks/main.yml
@@ -1,15 +1,43 @@
+- name: Gather Resource Group info
+ azure.azcollection.azure_rm_resourcegroup_info:
+ name: "{{ resource_group }}"
+ register: __rg_info
+
- name: Prepare random number
ansible.builtin.set_fact:
rpfx: "{{ resource_group | hash('md5') | truncate(8, True, '') }}"
new_resource_group: "{{ resource_group }}-02"
run_once: true
+- name: Set Azure Region based on resource group location
+ ansible.builtin.set_fact:
+ location: "{{ __rg_info.resourcegroups.0.location }}"
+
- name: Create a new resource group
azure_rm_resourcegroup:
name: "{{ new_resource_group }}"
- location: southeastasia
+ location: "{{ location }}"
+
+- name: Create User Managed Identities
+ azure_rm_resource:
+ resource_group: "{{ new_resource_group }}"
+ provider: ManagedIdentity
+ resource_type: userAssignedIdentities
+ resource_name: "{{ item }}"
+ api_version: "2023-01-31"
+ body:
+ location: "{{ location }}"
+ state: present
+ loop:
+ - "ansible-test-pgsql-identity"
+ - "ansible-test-pgsql-identity-2"
+
+- name: Set identities IDs to test. Identities ansible-test-psql-identity and ansible-test-psql-identity-2 have to be created previously
+ ansible.builtin.set_fact:
+ user_identity_1: "/subscriptions/{{ azure_subscription_id }}/resourcegroups/{{ new_resource_group }}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/ansible-test-pgsql-identity"
+ user_identity_2: "/subscriptions/{{ azure_subscription_id }}/resourcegroups/{{ new_resource_group }}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/ansible-test-pgsql-identity-2"
-- name: Create post gresql flexible server (check mode)
+- name: Create postgresql flexible server (check mode)
azure_rm_postgresqlflexibleserver:
resource_group: "{{ new_resource_group }}"
name: postflexible{{ rpfx }}
@@ -34,9 +62,14 @@
day_of_week: 3
availability_zone: 2
create_mode: Create
+ identity:
+ type: UserAssigned
+ user_assigned_identities:
+ id:
+ - "{{ user_identity_1 }}"
check_mode: true
-- name: Create post gresql flexible server
+- name: Create postgresql flexible server
azure_rm_postgresqlflexibleserver:
resource_group: "{{ new_resource_group }}"
name: postflexible{{ rpfx }}
@@ -61,14 +94,25 @@
day_of_week: 3
availability_zone: 2
create_mode: Create
+ identity:
+ type: UserAssigned
+ user_assigned_identities:
+ id:
+ - "{{ user_identity_1 }}"
register: output
-- name: Assert the post grep sql server create success
+- name: Assert the postgresql flexible server create success
ansible.builtin.assert:
that:
- output.changed
-- name: Create post gresql flexible server (Idempotent Test)
+- name: Assert User identity assigned
+ ansible.builtin.assert:
+ that:
+ - output.state.identity.type == 'UserAssigned'
+ - user_identity_1 in output.state.identity.user_assigned_identities
+
+- name: Create postgresql flexible server (Idempotent Test)
azure_rm_postgresqlflexibleserver:
resource_group: "{{ new_resource_group }}"
name: postflexible{{ rpfx }}
@@ -93,14 +137,19 @@
day_of_week: 3
availability_zone: 2
create_mode: Create
+ identity:
+ type: UserAssigned
+ user_assigned_identities:
+ id:
+ - "{{ user_identity_1 }}"
register: output
-- name: Assert the post grep sql server create success
+- name: Assert the postgresql server create success
ansible.builtin.assert:
that:
- not output.changed
-- name: Update post gresql flexible server with multiple parameters
+- name: Update postgresql flexible server with multiple parameters
azure_rm_postgresqlflexibleserver:
resource_group: "{{ new_resource_group }}"
name: postflexible{{ rpfx }}
@@ -128,9 +177,15 @@
tags:
key1: value1
key2: value2
+ identity:
+ type: "UserAssigned"
+ user_assigned_identities:
+ id:
+ - "{{ user_identity_2 }}"
+ append: true
register: output
-- name: Assert the post grep sql server update success
+- name: Assert the postgresql server update success
ansible.builtin.assert:
that:
- output.changed
@@ -141,7 +196,7 @@
name: postflexible{{ rpfx }}
register: output
-- name: Assert the post gresql server is well created
+- name: Assert the postgresql server is well created
ansible.builtin.assert:
that:
- output.servers[0].tags | length == 2
@@ -150,8 +205,23 @@
- output.servers[0].maintenance_window.day_of_week == 6
- output.servers[0].maintenance_window.start_hour == 10
- output.servers[0].maintenance_window.start_minute == 6
+ - user_identity_1 in output.servers[0].identity.user_assigned_identities
+ - user_identity_2 in output.servers[0].identity.user_assigned_identities
+
+- name: Postgresql server Identity None
+ azure_rm_postgresqlflexibleserver:
+ name: postflexible{{ rpfx }}
+ resource_group: "{{ new_resource_group }}"
+ identity:
+ type: "None"
+ register: output
+
+- name: Assert no managed identities
+ ansible.builtin.assert:
+ that:
+ - output.state.identity.type == 'None'
-- name: Create a post gresql flexible database(check mode)
+- name: Create a postgresql flexible database(check mode)
azure_rm_postgresqlflexibledatabase:
resource_group: "{{ new_resource_group }}"
server_name: postflexible{{ rpfx }}
@@ -160,7 +230,7 @@
charset: UTF8
check_mode: true
-- name: Create a post gresql flexible database
+- name: Create a postgresql flexible database
azure_rm_postgresqlflexibledatabase:
resource_group: "{{ new_resource_group }}"
server_name: postflexible{{ rpfx }}
@@ -169,12 +239,12 @@
charset: UTF8
register: output
-- name: Assert the post gresql flexible database created success
+- name: Assert the postgresql flexible database created success
ansible.builtin.assert:
that:
- output.changed
-- name: Create a post gresql flexible database(Idempotent test)
+- name: Create a postgresql flexible database(Idempotent test)
azure_rm_postgresqlflexibledatabase:
resource_group: "{{ new_resource_group }}"
server_name: postflexible{{ rpfx }}
@@ -183,25 +253,25 @@
charset: UTF8
register: output
-- name: Assert the post gresql flexible database no changed
+- name: Assert the postgresql flexible database no changed
ansible.builtin.assert:
that:
- not output.changed
-- name: Get the post gresql flexibe database facts
+- name: Get the postgresql flexibe database facts
azure_rm_postgresqlflexibledatabase_info:
resource_group: "{{ new_resource_group }}"
server_name: postflexible{{ rpfx }}
name: database{{ rpfx }}
register: output
-- name: Assert the post gresql flexible database facts
+- name: Assert the postgresql flexible database facts
ansible.builtin.assert:
that:
- output.databases[0].collation == 'en_US.utf8'
- output.databases[0].charset == 'UTF8'
-- name: Delete the post gresql flexibe database
+- name: Delete the postgresql flexibe database
azure_rm_postgresqlflexibledatabase:
resource_group: "{{ new_resource_group }}"
server_name: postflexible{{ rpfx }}
@@ -209,12 +279,12 @@
state: absent
register: output
-- name: Assert the post gresql flexible database deleted
+- name: Assert the postgresql flexible database deleted
ansible.builtin.assert:
that:
- output.changed
-- name: Create a post gresql flexible firwall rule (Check mode)
+- name: Create a postgresql flexible firwall rule (Check mode)
azure_rm_postgresqlflexiblefirewallrule:
resource_group: "{{ new_resource_group }}"
server_name: postflexible{{ rpfx }}
@@ -223,7 +293,7 @@
end_ip_address: 10.0.0.20
check_mode: true
-- name: Create the post gresql flexible firwall rule
+- name: Create the postgresql flexible firwall rule
azure_rm_postgresqlflexiblefirewallrule:
resource_group: "{{ new_resource_group }}"
server_name: postflexible{{ rpfx }}
@@ -232,12 +302,12 @@
end_ip_address: 10.0.0.20
register: output
-- name: Assert the post grepsql flexible firewall rule created well
+- name: Assert the postgrepsql flexible firewall rule created well
ansible.builtin.assert:
that:
- output.changed
-- name: Create the post gresql flexible firwall rule (Idempotent test)
+- name: Create the postgresql flexible firwall rule (Idempotent test)
azure_rm_postgresqlflexiblefirewallrule:
resource_group: "{{ new_resource_group }}"
server_name: postflexible{{ rpfx }}
@@ -246,12 +316,12 @@
end_ip_address: 10.0.0.20
register: output
-- name: Assert the post grepsql flexible firewall rule support idempotent test
+- name: Assert the postgresql flexible firewall rule support idempotent test
ansible.builtin.assert:
that:
- not output.changed
-- name: Update the post gresql flexible firwall rule
+- name: Update the postgresql flexible firwall rule
azure_rm_postgresqlflexiblefirewallrule:
resource_group: "{{ new_resource_group }}"
server_name: postflexible{{ rpfx }}
@@ -260,25 +330,25 @@
end_ip_address: 10.0.0.18
register: output
-- name: Assert the post grepsql flexible server update well
+- name: Assert the postgresql flexible server update well
ansible.builtin.assert:
that:
- output.changed
-- name: Get the post gresql flexible firwall rule facts
+- name: Get the postgresql flexible firwall rule facts
azure_rm_postgresqlflexiblefirewallrule_info:
resource_group: "{{ new_resource_group }}"
server_name: postflexible{{ rpfx }}
name: firewall{{ rpfx }}
register: output
-- name: Assert the post gresql flexible firewall rule facts
+- name: Assert the postgresql flexible firewall rule facts
ansible.builtin.assert:
that:
- output.firewall_rules[0].start_ip_address == '10.0.0.16'
- output.firewall_rules[0].end_ip_address == '10.0.0.18'
-- name: Delete the post gresql flexible firwall rule
+- name: Delete the postgresql flexible firwall rule
azure_rm_postgresqlflexiblefirewallrule:
resource_group: "{{ new_resource_group }}"
server_name: postflexible{{ rpfx }}
@@ -286,30 +356,30 @@
state: absent
register: output
-- name: Assert the post grepsql flexible server delete well
+- name: Assert the postgresql flexible server delete well
ansible.builtin.assert:
that:
- output.changed
-- name: List the post gresql flexible config facts
+- name: List the postgresql flexible config facts
azure_rm_postgresqlflexibleconfiguration_info:
resource_group: "{{ new_resource_group }}"
server_name: postflexible{{ rpfx }}
register: output
-- name: Assert the post gresql flexible server configuration
+- name: Assert the postgresql flexible server configuration
ansible.builtin.assert:
that:
- output.settings | length > 0
-- name: Stop the post gresql flexible server
+- name: Stop the postgresql flexible server
azure_rm_postgresqlflexibleserver:
resource_group: "{{ new_resource_group }}"
name: postflexible{{ rpfx }}
is_stop: true
register: output
-- name: Assert the post grep sql server stop success
+- name: Assert the postgresql server stop success
ansible.builtin.assert:
that:
- output.changed
@@ -319,30 +389,42 @@
minutes: 10
changed_when: true
-- name: Restart post gresql flexible server
+- name: Restart postgresql flexible server
azure_rm_postgresqlflexibleserver:
resource_group: "{{ new_resource_group }}"
name: postflexible{{ rpfx }}
is_restart: true
register: output
-- name: Assert the post grep sql server restart success
+- name: Assert the postgresql server restart success
ansible.builtin.assert:
that:
- output.changed
-- name: Delete post gresql flexible server
+- name: Delete postgresql flexible server
azure_rm_postgresqlflexibleserver:
resource_group: "{{ new_resource_group }}"
name: postflexible{{ rpfx }}
state: absent
register: output
-- name: Assert the post gresql server is well deleted
+- name: Assert the postgresql server is well deleted
ansible.builtin.assert:
that:
- output.changed
+- name: Destroy User Managed Identities
+ azure_rm_resource:
+ resource_group: "{{ new_resource_group }}"
+ provider: ManagedIdentity
+ resource_type: userAssignedIdentities
+ resource_name: "{{ item }}"
+ api_version: "2023-01-31"
+ state: absent
+ loop:
+ - "ansible-test-psql-identity"
+ - "ansible-test-psql-identity-2"
+
- name: Delete the new resource group
azure_rm_resourcegroup:
name: "{{ new_resource_group }}"
diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatelinkservice/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatelinkservice/tasks/main.yml
index a3743b074..d42d65ecd 100644
--- a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatelinkservice/tasks/main.yml
+++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_privatelinkservice/tasks/main.yml
@@ -1,7 +1,6 @@
- name: Set Private Link Service Names
ansible.builtin.set_fact:
rpfx: "{{ resource_group | hash('md5') | truncate(20, True, '') }}"
- azure_subscription_id: f64d4ee8-be94-457d-ba26-3fa6b6506cef
- name: Delete the private link service
azure_rm_privatelinkservice:
diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_datalakestore/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_sqlmanageddatabase/aliases
index 6feba04aa..e133dafcd 100644
--- a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_datalakestore/aliases
+++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_sqlmanageddatabase/aliases
@@ -1,4 +1,4 @@
cloud/azure
-shippable/azure/group10
destructive
+shippable/azure/group9
disabled
diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_sqlmanageddatabase/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_sqlmanageddatabase/meta/main.yml
new file mode 100644
index 000000000..95e1952f9
--- /dev/null
+++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_sqlmanageddatabase/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_azure
diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_sqlmanageddatabase/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_sqlmanageddatabase/tasks/main.yml
new file mode 100644
index 000000000..ddbc1b881
--- /dev/null
+++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_sqlmanageddatabase/tasks/main.yml
@@ -0,0 +1,264 @@
+- name: Prepare random number
+ ansible.builtin.set_fact:
+ random_postfix: "sqlmi{{ 1000 | random }}{{ resource_group | hash('md5') | truncate(8, True, '') }}"
+
+- name: Create virtual network
+ azure_rm_virtualnetwork:
+ name: "{{ random_postfix }}"
+ address_prefixes_cidr:
+ - 10.1.0.0/16
+ - 172.100.0.0/16
+ - fdda:e69b:1587:495e::/64
+ dns_servers:
+ - 127.0.0.1
+ - 127.0.0.3
+ resource_group: "{{ resource_group }}"
+
+- name: Create a route table
+ azure_rm_routetable:
+ name: "{{ random_postfix }}"
+ resource_group: "{{ resource_group }}"
+ tags:
+ purpose: testing
+
+- name: Create security group
+ azure_rm_securitygroup:
+ resource_group: "{{ resource_group }}"
+ name: "{{ random_postfix }}"
+ tags:
+ testing: testing
+ delete: on-exit
+ foo: bar
+ purge_rules: true
+ rules:
+ - name: DenySSH
+ protocol: Tcp
+ destination_port_range: 22
+ access: Deny
+ priority: 100
+ direction: Inbound
+ - name: AllowSSH
+ protocol: Tcp
+ source_address_prefix: 174.109.158.0/24
+ destination_port_range: 22
+ access: Allow
+ priority: 101
+ direction: Inbound
+
+- name: Add the subnet back
+ azure_rm_subnet:
+ name: foobar
+ virtual_network_name: "{{ random_postfix }}"
+ resource_group: "{{ resource_group }}"
+ address_prefix_cidr: "10.1.0.0/24"
+ security_group:
+ resource_gorup: "{{ resource_group }}"
+ name: "{{ random_postfix }}"
+ route_table:
+ name: "{{ random_postfix }}"
+ resource_group: "{{ resource_group }}"
+ delegations:
+ - name: 'mysqlinstance'
+ serviceName: "Microsoft.Sql/managedInstances"
+ register: subnet_output
+
+- name: Create sql managed instance
+ azure_rm_sqlmanagedinstance:
+ resource_group: "{{ resource_group }}"
+ name: "{{ random_postfix }}"
+ subnet_id: "{{ subnet_output.state.id }}"
+ identity:
+ type: SystemAssigned
+ sku:
+ name: GP_Gen5
+ tier: GeneralPurpose
+ family: Gen5
+ capacity: 8
+ administrator_login: azureuser
+ administrator_login_password: Fredtest@password0329test
+ storage_size_in_gb: 256
+ v_cores: 8
+
+- name: Create a new sql managed instance database(checkmode test)
+ azure_rm_sqlmidatabase:
+ resource_group: "{{ resource_group }}"
+ managed_instance_name: "{{ random_postfix }}"
+ database_name: "database{{ random_postfix }}"
+ collation: SQL_Latin1_General_CP1_CI_AS
+ location: eastus
+ tags:
+ key1: value1
+ check_mode: true
+
+- name: Create a new sql managed instance database
+ azure_rm_sqlmidatabase:
+ resource_group: "{{ resource_group }}"
+ managed_instance_name: "{{ random_postfix }}"
+ database_name: "database{{ random_postfix }}"
+ collation: SQL_Latin1_General_CP1_CI_AS
+ location: eastus
+ tags:
+ key1: value1
+ register: output
+
+- name: Assert the datebase is well created
+ ansible.builtin.assert:
+ that:
+ - output.changed
+
+- name: Create a new sql managed instance database (Idempotent test)
+ azure_rm_sqlmidatabase:
+ resource_group: "{{ resource_group }}"
+ managed_instance_name: "{{ random_postfix }}"
+ database_name: "database{{ random_postfix }}"
+ collation: SQL_Latin1_General_CP1_CI_AS
+ location: eastus
+ tags:
+ key1: value1
+ register: output
+
+- name: Assert the datebase has no changed
+ ansible.builtin.assert:
+ that:
+ - not output.changed
+
+- name: Update the sql managed instance database tags
+ azure_rm_sqlmidatabase:
+ resource_group: "{{ resource_group }}"
+ managed_instance_name: "{{ random_postfix }}"
+ database_name: "database{{ random_postfix }}"
+ collation: SQL_Latin1_General_CP1_CI_AS
+ location: eastus
+ tags:
+ key2: value2
+ register: output
+
+- name: Assert the datebase udpated
+ ansible.builtin.assert:
+ that:
+ - output.changed
+
+- name: Get the sql managed instance database facts
+ azure_rm_sqlmidatabase_info:
+ resource_group: "{{ resource_group }}"
+ managed_instance_name: "{{ random_postfix }}"
+ database_name: "database{{ random_postfix }}"
+ register: output
+
+- name: Assert the datebase facts
+ ansible.builtin.assert:
+ that:
+ - output.database[0].tags |length == 2
+ - output.database[0].collation == "SQL_Latin1_General_CP1_CI_AS"
+
+- name: Get sql managed database long term retention policy by name
+ azure_rm_sqlmidblongtermretentionpolicy_info:
+ resource_group: "{{ resource_group }}"
+ managed_instance_name: "{{ random_postfix }}"
+ database_name: "database{{ random_postfix }}"
+ policy_name: default
+ register: output
+
+- name: Assert the sql managed datebase long term retention policy facts
+ ansible.builtin.assert:
+ that:
+ - output.long_term_retention_policy[0].name == 'default'
+ - output.long_term_retention_policy[0].monthly_retention == "PT0S"
+ - output.long_term_retention_policy[0].yearly_retention == "PT0S"
+ - output.long_term_retention_policy[0].weekly_retention == "PT0S"
+ - output.long_term_retention_policy[0].week_of_year == 0
+
+- name: Update the database long term retention policy
+ azure_rm_sqlmidblongtermretentionpolicy:
+ resource_group: "{{ resource_group }}"
+ managed_instance_name: "{{ random_postfix }}"
+ database_name: "database{{ random_postfix }}"
+ policy_name: default
+ monthly_retention: P3M
+ week_of_year: 13
+ weekly_retention: P13W
+ yearly_retention: P3Y
+ register: output
+
+- name: Assert the sql managed datebase long term retention policy updated
+ ansible.builtin.assert:
+ that:
+ - output.changed
+
+- name: Get sql managed database long term retention policy by name
+ azure_rm_sqlmidblongtermretentionpolicy_info:
+ resource_group: "{{ resource_group }}"
+ managed_instance_name: "{{ random_postfix }}"
+ database_name: "database{{ random_postfix }}"
+ policy_name: default
+ register: output
+
+- name: Assert the sql managed datebase long term retention policy facts
+ ansible.builtin.assert:
+ that:
+ - output.long_term_retention_policy[0].name == 'default'
+ - output.long_term_retention_policy[0].monthly_retention == "P3M"
+ - output.long_term_retention_policy[0].yearly_retention == "P3Y"
+ - output.long_term_retention_policy[0].weekly_retention == "P13W"
+ - output.long_term_retention_policy[0].week_of_year == 13
+
+- name: Get the sql managed instance database short term retention policy facts
+ azure_rm_sqlmidbshorttermretentionpolicy_info:
+ resource_group: "{{ resource_group }}"
+ managed_instance_name: "{{ random_postfix }}"
+ database_name: "database{{ random_postfix }}"
+ policy_name: default
+ register: output
+
+- name: Assert the sql managed datebase short term retention policy facts
+ ansible.builtin.assert:
+ that:
+ - output.short_term_retention_policy[0].name == 'default'
+ - output.short_term_retention_policy[0].retention_days == 7
+
+- name: Update the sql managed short term retetion
+ azure_rm_sqlmidbshorttermretentionpolicy:
+ resource_group: "{{ resource_group }}"
+ managed_instance_name: "{{ random_postfix }}"
+ database_name: "database{{ random_postfix }}"
+ policy_name: default
+ retention_days: 10
+ register: output
+
+- name: Assert the sql managed datebase short term retention policy updated
+ ansible.builtin.assert:
+ that:
+ - output.changed
+
+- name: Get the sql managed instance database short term retention policy facts
+ azure_rm_sqlmidbshorttermretentionpolicy_info:
+ resource_group: "{{ resource_group }}"
+ managed_instance_name: "{{ random_postfix }}"
+ database_name: "database{{ random_postfix }}"
+ policy_name: default
+ register: output
+
+- name: Assert the sql managed datebase short term retention policy facts
+ ansible.builtin.assert:
+ that:
+ - output.short_term_retention_policy[0].name == 'default'
+ - output.short_term_retention_policy[0].retention_days == 10
+
+- name: Delete the sql managed database
+ azure_rm_sqlmidatabase:
+ resource_group: "{{ resource_group }}"
+ managed_instance_name: "{{ random_postfix }}"
+ database_name: "database{{ random_postfix }}"
+ state: absent
+ register: output
+
+- name: Assert the sql managed datebase deleted
+ ansible.builtin.assert:
+ that:
+ - output.changed
+
+- name: Delete sql managed instance
+ azure_rm_sqlmanagedinstance:
+ resource_group: "{{ resource_group }}"
+ name: "{{ random_postfix }}"
+ state: absent
diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageaccountmanagementpolicy/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageaccountmanagementpolicy/aliases
new file mode 100644
index 000000000..aa77c071a
--- /dev/null
+++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageaccountmanagementpolicy/aliases
@@ -0,0 +1,3 @@
+cloud/azure
+shippable/azure/group2
+destructive
diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageaccountmanagementpolicy/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageaccountmanagementpolicy/meta/main.yml
new file mode 100644
index 000000000..95e1952f9
--- /dev/null
+++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageaccountmanagementpolicy/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_azure
diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageaccountmanagementpolicy/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageaccountmanagementpolicy/tasks/main.yml
new file mode 100644
index 000000000..96245a06a
--- /dev/null
+++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageaccountmanagementpolicy/tasks/main.yml
@@ -0,0 +1,242 @@
+- name: Set random value
+ ansible.builtin.set_fact:
+ rpfx: "{{ resource_group | hash('md5') | truncate(22, True, '') }}"
+
+- name: Create storage account with static website disabled
+ azure_rm_storageaccount:
+ resource_group: "{{ resource_group }}"
+ name: "st{{ rpfx }}"
+ account_type: Standard_LRS
+ kind: StorageV2
+ static_website:
+ enabled: false
+
+- name: Create storage account management policy rule (Check mode test)
+ azure_rm_storageaccountmanagementpolicy:
+ resource_group: "{{ resource_group }}"
+ storage_account_name: "st{{ rpfx }}"
+ rules:
+ - name: olcmtest
+ type: Lifecycle
+ enabled: false
+ definition:
+ actions:
+ base_blob:
+ enable_auto_tier_to_hot_from_cool: true
+ delete:
+ days_after_modification_greater_than: 22
+ days_after_last_access_time_greater_than: 22
+ tier_to_cool:
+ days_after_modification_greater_than: 22
+ days_after_last_access_time_greater_than: 22
+ tier_to_archive:
+ days_after_modification_greater_than: 22
+ days_after_last_access_time_greater_than: 22
+ snapshot:
+ tier_to_cool:
+ days_after_creation_greater_than: 22
+ tier_to_archive:
+ days_after_creation_greater_than: 22
+ delete:
+ days_after_creation_greater_than: 22
+ version:
+ tier_to_archive:
+ days_after_creation_greater_than: 22
+ tier_to_cool:
+ days_after_creation_greater_than: 22
+ delete:
+ days_after_creation_greater_than: 22
+ filters:
+ prefix_match:
+ - olcmtestcontainer
+ blob_types:
+ - blockBlob
+ - appendBlob
+ blob_index_match:
+ - name: tags6
+ op: '=='
+ value: value6
+ check_mode: true
+
+- name: Create storage account management policy rule
+ azure_rm_storageaccountmanagementpolicy:
+ resource_group: "{{ resource_group }}"
+ storage_account_name: "st{{ rpfx }}"
+ rules:
+ - name: olcmtest
+ type: Lifecycle
+ enabled: false
+ definition:
+ actions:
+ base_blob:
+ enable_auto_tier_to_hot_from_cool: true
+ delete:
+ days_after_modification_greater_than: 22
+ days_after_last_access_time_greater_than: 22
+ tier_to_cool:
+ days_after_modification_greater_than: 22
+ days_after_last_access_time_greater_than: 22
+ tier_to_archive:
+ days_after_modification_greater_than: 22
+ days_after_last_access_time_greater_than: 22
+ snapshot:
+ tier_to_cool:
+ days_after_creation_greater_than: 22
+ tier_to_archive:
+ days_after_creation_greater_than: 22
+ delete:
+ days_after_creation_greater_than: 22
+ version:
+ tier_to_archive:
+ days_after_creation_greater_than: 22
+ tier_to_cool:
+ days_after_creation_greater_than: 22
+ delete:
+ days_after_creation_greater_than: 22
+ filters:
+ prefix_match:
+ - olcmtestcontainer
+ blob_types:
+ - blockBlob
+ - appendBlob
+ blob_index_match:
+ - name: tags6
+ op: '=='
+ value: value6
+ register: output
+
+- name: Assert the Managed policy created
+ ansible.builtin.assert:
+ that:
+ - output.changed
+
+- name: Create storage account management policy rule(Idempotent test)
+ azure_rm_storageaccountmanagementpolicy:
+ resource_group: "{{ resource_group }}"
+ storage_account_name: "st{{ rpfx }}"
+ rules:
+ - name: olcmtest
+ type: Lifecycle
+ enabled: false
+ definition:
+ actions:
+ base_blob:
+ enable_auto_tier_to_hot_from_cool: true
+ delete:
+ days_after_modification_greater_than: 22
+ days_after_last_access_time_greater_than: 22
+ tier_to_cool:
+ days_after_modification_greater_than: 22
+ days_after_last_access_time_greater_than: 22
+ tier_to_archive:
+ days_after_modification_greater_than: 22
+ days_after_last_access_time_greater_than: 22
+ snapshot:
+ tier_to_cool:
+ days_after_creation_greater_than: 22
+ tier_to_archive:
+ days_after_creation_greater_than: 22
+ delete:
+ days_after_creation_greater_than: 22
+ version:
+ tier_to_archive:
+ days_after_creation_greater_than: 22
+ tier_to_cool:
+ days_after_creation_greater_than: 22
+ delete:
+ days_after_creation_greater_than: 22
+ filters:
+ prefix_match:
+ - olcmtestcontainer
+ blob_types:
+ - blockBlob
+ - appendBlob
+ blob_index_match:
+ - name: tags6
+ op: '=='
+ value: value6
+ register: output
+
+- name: Assert the Managed policy no change
+ ansible.builtin.assert:
+ that:
+ - not output.changed
+
+- name: Create storage account management policy rule(Updating test)
+ azure_rm_storageaccountmanagementpolicy:
+ resource_group: "{{ resource_group }}"
+ storage_account_name: "st{{ rpfx }}"
+ rules:
+ - name: olcmtest
+ type: Lifecycle
+ enabled: false
+ definition:
+ actions:
+ base_blob:
+ enable_auto_tier_to_hot_from_cool: true
+ delete:
+ days_after_modification_greater_than: 33
+ days_after_last_access_time_greater_than: 33
+ tier_to_cool:
+ days_after_modification_greater_than: 33
+ days_after_last_access_time_greater_than: 33
+ tier_to_archive:
+ days_after_modification_greater_than: 33
+ days_after_last_access_time_greater_than: 33
+ snapshot:
+ tier_to_cool:
+ days_after_creation_greater_than: 33
+ tier_to_archive:
+ days_after_creation_greater_than: 33
+ delete:
+ days_after_creation_greater_than: 33
+ version:
+ tier_to_archive:
+ days_after_creation_greater_than: 33
+ tier_to_cool:
+ days_after_creation_greater_than: 33
+ delete:
+ days_after_creation_greater_than: 33
+ filters:
+ prefix_match:
+ - olcmtestcontainer
+ blob_types:
+ - blockBlob
+ - appendBlob
+ blob_index_match:
+ - name: tags6
+ op: '=='
+ value: value6
+ register: output
+
+- name: Assert the Managed policy updated
+ ansible.builtin.assert:
+ that:
+ - output.changed
+
+- name: Get storage account management policy
+ azure_rm_storageaccountmanagementpolicy_info:
+ resource_group: "{{ resource_group }}"
+ storage_account_name: "st{{ rpfx }}"
+ register: output
+
+- name: Assert the management policy rule facts
+ ansible.builtin.assert:
+ that:
+ - output.state.policy.rules[0].enabled is false
+ - output.state.policy.rules[0].name == 'olcmtest'
+ - output.state.policy.rules[0].type == 'Lifecycle'
+ - output.state.policy.rules[0].definition.actions.base_blob.enable_auto_tier_to_hot_from_cool is true
+ - output.state.policy.rules | length == 1
+
+- name: Delete the storage account management policy's rule
+ azure_rm_storageaccountmanagementpolicy:
+ resource_group: "{{ resource_group }}"
+ storage_account_name: "st{{ rpfx }}"
+ state: absent
+ register: output
+
+- name: Assert the Managed policy deleted
+ ansible.builtin.assert:
+ that:
+ - output.changed
diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageblob/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageblob/tasks/main.yml
index 98cd3dc38..71abbbbf1 100644
--- a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageblob/tasks/main.yml
+++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_storageblob/tasks/main.yml
@@ -8,6 +8,7 @@
resource_group: "{{ resource_group }}"
name: "{{ storage_account }}"
account_type: Standard_LRS
+ register: az_storageaccount
- name: Create container
azure_rm_storageblob:
@@ -16,6 +17,24 @@
account_name: "{{ storage_account }}"
container_name: my-blobs
+- name: Lookup service principal object id
+ ansible.builtin.set_fact:
+ object_id: "{{ lookup('azure.azcollection.azure_service_principal_attribute',
+ azure_client_id=azure_client_id,
+ azure_secret=azure_secret,
+ azure_tenant=azure_tenant) }}"
+ register: spn_facts
+
+- name: Create role assignment by scope (Storage Blob Data Contributor)
+ azure_rm_roleassignment:
+ scope: "{{ az_storageaccount.state.id }}/blobServices/default/containers/my-blobs"
+ assignee_object_id: "{{ spn_facts.ansible_facts.object_id }}"
+ role_definition_id: "/providers/Microsoft.Authorization/roleDefinitions/ba92f5b4-2d11-453d-a403-e96b0029c9fe"
+
+- name: Pause for 30 seconds
+ ansible.builtin.command: sleep 30
+ changed_when: true
+
- name: Force upload blob
azure_rm_storageblob:
auth_mode: login
diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/inventory.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/inventory.yml
index 63ba77727..62525210f 100644
--- a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/inventory.yml
+++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/inventory.yml
@@ -13,6 +13,10 @@ all:
network: 10.42.2.0/24
subnet: 10.42.2.0/28
+ azure_test_skus:
+ network: 10.42.3.0/24
+ subnet: 10.42.3.0/28
+
azure_test_minimal:
network: 10.42.3.0/24
subnet: 10.42.3.0/28
diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_public_ip.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_public_ip.yml
index f24be88e2..0ef24fd82 100644
--- a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_public_ip.yml
+++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_public_ip.yml
@@ -31,9 +31,12 @@
azure_rm_networkinterface:
resource_group: "{{ resource_group }}"
name: "{{ interface_name }}"
+ ip_configurations:
+ - name: default
+ primary: true
+ public_ip_address_name: "{{ public_ip_name }}"
virtual_network: "{{ network_name }}"
subnet: "{{ subnet_name }}"
- public_ip_name: "{{ public_ip_name }}"
security_group: "{{ security_group }}"
- name: Create virtual machine with a single NIC and no boot diagnostics
@@ -233,7 +236,7 @@
azure_rm_virtualmachine:
resource_group: "{{ resource_group }}"
name: "{{ vm_name }}"
- vm_size: Standard_A1
+ vm_size: Standard_A1_v2
storage_account: "{{ storage_account }}"
storage_container: "{{ vm_name }}"
storage_blob: "{{ vm_name }}.vhd"
@@ -256,7 +259,7 @@
ansible.builtin.assert:
that:
- resize_result is changed
- - resize_result.ansible_facts.azure_vm.hardware_profile.vm_size == "Standard_A1"
+ - resize_result.ansible_facts.azure_vm.hardware_profile.vm_size == "Standard_A1_v2"
- name: Delete VM
azure_rm_virtualmachine:
diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_skus.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_skus.yml
new file mode 100644
index 000000000..f65c15869
--- /dev/null
+++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachine/tasks/azure_test_skus.yml
@@ -0,0 +1,136 @@
+- name: Set variables
+ ansible.builtin.include_tasks: setup.yml
+
+- name: Gather Resource Group info
+ azure.azcollection.azure_rm_resourcegroup_info:
+ name: "{{ resource_group }}"
+ register: rg_info
+
+- name: List available VM SKUs
+ azure.azcollection.azure_rm_vmsku_info:
+ location: "{{ rg_info.resourcegroups.0.location }}"
+ resource_type: "virtualMachines"
+ size: "standard_B1"
+ zone: true
+ register: available_skus_result
+
+- name: Create desired capabilities list
+ ansible.builtin.set_fact:
+ desired_capabilities: [
+ {
+ "name": "MaxResourceVolumeMB",
+ "value": "4096"
+ },
+ {
+ "name": "MemoryGB",
+ "value": "2"
+ }]
+
+- name: Filter available SKUs with desired capabilities
+ ansible.builtin.set_fact:
+ skus_result: |
+ {% set skus_result = [] %}
+ {% for item in available_skus_result.available_skus -%}
+ {% set ns = namespace(use_sku=True) %}
+ {% for capability in item.capabilities -%}
+ {% for desired in desired_capabilities -%}
+ {% if capability.name == desired.name and capability.value != desired.value -%}
+ {% set ns.use_sku = False %}
+ {%- endif %}
+ {%- endfor %}
+ {%- endfor %}
+ {% if ns.use_sku -%}
+ {{ skus_result.append(item.name) }}
+ {%- endif %}
+ {%- endfor %}
+ {{ skus_result }}
+ failed_when: skus_result[0] is not defined
+
+- name: Create VM with first sku in avilable skus list
+ azure_rm_virtualmachine:
+ resource_group: "{{ resource_group }}"
+ name: "{{ vm_name }}"
+ admin_username: "testuser"
+ ssh_password_enabled: false
+ ssh_public_keys:
+ - path: /home/testuser/.ssh/authorized_keys
+ key_data: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfoYlIV4lTPZTv7hXaVwQQuqBgGs4yeNRX0SPo2+HQt9u4X7IGwrtXc0nEUm6LfaCikMH58bOL8f20NTGz285kxdFHZRcBXtqmnMz2rXwhK9gwq5h1khc+GzHtdcJXsGA4y0xuaNcidcg04jxAlN/06fwb/VYwwWTVbypNC0gpGEpWckCNm8vlDlA55sU5et0SZ+J0RKVvEaweUOeNbFZqckGPA384imfeYlADppK/7eAxqfBVadVvZG8IJk4yvATgaIENIFj2cXxqu2mQ/Bp5Wr45uApvJsFXmi+v/nkiOEV1QpLOnEwAZo6EfFS4CCQtsymxJCl1PxdJ5LD4ZOtP xiuxi.sun@qq.com"
+ vm_size: "{{ skus_result[0] }}"
+ virtual_network: "{{ network_name }}"
+ image:
+ offer: 0001-com-ubuntu-server-focal
+ publisher: Canonical
+ sku: 20_04-lts
+ version: latest
+ register: vm_output
+
+- name: Query auto created security group before deleting
+ azure_rm_securitygroup_info:
+ resource_group: "{{ resource_group }}"
+ name: "{{ vm_name }}01"
+ register: nsg_result
+
+- name: Assert that security group were exist before deleting
+ ansible.builtin.assert:
+ that:
+ - nsg_result.securitygroups | length == 1
+ - nsg_result.securitygroups[0].network_interfaces | length == 1
+
+- name: Delete VM
+ azure_rm_virtualmachine:
+ resource_group: "{{ resource_group }}"
+ name: "{{ vm_name }}"
+ remove_on_absent: all_autocreated
+ state: absent
+
+- name: Query auto created NIC
+ azure_rm_networkinterface_info:
+ resource_group: "{{ resource_group }}"
+ name: "{{ vm_name }}01"
+ register: nic_result
+
+- name: Query auto created security group
+ azure_rm_securitygroup_info:
+ resource_group: "{{ resource_group }}"
+ name: "{{ vm_name }}01"
+ register: nsg_result
+
+- name: Query auto created public IP
+ azure_rm_publicipaddress_info:
+ resource_group: "{{ resource_group }}"
+ name: "{{ vm_name }}01"
+ register: pip_result
+
+- name: Assert that autocreated resources were deleted
+ ansible.builtin.assert:
+ that:
+ # what about the default storage group?
+ - nic_result.networkinterfaces | length == 0
+ - nsg_result.securitygroups | length == 0
+ - pip_result.publicipaddresses | length == 0
+
+- name: Destroy subnet
+ azure_rm_subnet:
+ resource_group: "{{ resource_group }}"
+ virtual_network: "{{ network_name }}"
+ name: "{{ subnet_name }}"
+ state: absent
+
+- name: Destroy virtual network
+ azure_rm_virtualnetwork:
+ resource_group: "{{ resource_group }}"
+ name: "{{ network_name }}"
+ state: absent
+
+- name: Destroy availability set
+ azure_rm_availabilityset:
+ resource_group: "{{ resource_group }}"
+ name: "{{ availability_set }}"
+ state: absent
+
+- name: Destroy storage account
+ azure_rm_storageaccount:
+ resource_group: "{{ resource_group }}"
+ name: "{{ storage_account }}"
+ force_delete_nonempty: true
+ state: absent
diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachineextension/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachineextension/tasks/main.yml
index e053cf0b8..b544b4a71 100644
--- a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachineextension/tasks/main.yml
+++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachineextension/tasks/main.yml
@@ -39,9 +39,12 @@
azure_rm_networkinterface:
resource_group: "{{ resource_group }}"
name: testNIC
+ ip_configurations:
+ - name: default
+ primary: true
+ public_ip_address_name: testPublicIP
virtual_network: testVnet1
subnet: testSubnet
- public_ip_name: testPublicIP
security_group_name: testNetworkSecurityGroup
- name: Create a storage account
diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachinescaleset/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachinescaleset/tasks/main.yml
index c3c5336ae..dd429406c 100644
--- a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachinescaleset/tasks/main.yml
+++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualmachinescaleset/tasks/main.yml
@@ -27,7 +27,11 @@
azure_rm_loadbalancer:
resource_group: "{{ resource_group }}"
name: testLB
- public_ip_address_name: testPublicIP
+ frontend_ip_configurations:
+ - name: frontendipconf0
+ public_ip_address: testPublicIP
+ backend_address_pools:
+ - name: backendaddrp0
sku: Standard
- name: Create public IP address 1
@@ -41,7 +45,11 @@
azure_rm_loadbalancer:
resource_group: "{{ resource_group }}"
name: testLB1
- public_ip_address_name: testPublicIP1
+ frontend_ip_configurations:
+ - name: frontendipconf0
+ public_ip_address: testPublicIP1
+ backend_address_pools:
+ - name: backendaddrp1
sku: Standard
- name: Create network security group within same resource group of VMSS.
diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetworkgatewaynatrule/aliases b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetworkgatewaynatrule/aliases
new file mode 100644
index 000000000..aa77c071a
--- /dev/null
+++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetworkgatewaynatrule/aliases
@@ -0,0 +1,3 @@
+cloud/azure
+shippable/azure/group2
+destructive
diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetworkgatewaynatrule/meta/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetworkgatewaynatrule/meta/main.yml
new file mode 100644
index 000000000..95e1952f9
--- /dev/null
+++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetworkgatewaynatrule/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_azure
diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetworkgatewaynatrule/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetworkgatewaynatrule/tasks/main.yml
new file mode 100644
index 000000000..9ca17e251
--- /dev/null
+++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_virtualnetworkgatewaynatrule/tasks/main.yml
@@ -0,0 +1,131 @@
+- name: Prepare random number
+ ansible.builtin.set_fact:
+ natrulename: "nat{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
+ vnetname: "vnet{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
+ vngname: "vng{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
+ pubipname: "testPublicIP{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
+
+- name: Create virtual network
+ azure_rm_virtualnetwork:
+ resource_group: "{{ resource_group }}"
+ name: "{{ vnetname }}"
+ address_prefixes: "10.0.0.0/16"
+
+- name: Add subnet
+ azure_rm_subnet:
+ resource_group: "{{ resource_group }}"
+ name: GatewaySubnet
+ address_prefix: "10.0.2.0/24"
+ virtual_network: "{{ vnetname }}"
+
+- name: Create public IP address
+ azure_rm_publicipaddress:
+ resource_group: "{{ resource_group }}"
+ allocation_method: Dynamic
+ name: "{{ pubipname }}"
+
+- name: Create a virtual network gateway
+ azure_rm_virtualnetworkgateway:
+ resource_group: "{{ resource_group }}"
+ name: "{{ vngname }}"
+ sku: VpnGw2
+ vpn_gateway_generation: Generation2
+ ip_configurations:
+ - name: testipconfig
+ private_ip_allocation_method: Dynamic
+ public_ip_address_name: "{{ pubipname }}"
+ virtual_network: "{{ vnetname }}"
+
+- name: Create a virtual netowrk nat rule(check_mode test)
+ azure_rm_virtualnetworkgatewaynatrule:
+ resource_group: "{{ resource_group }}"
+ virtual_network_gateway_name: "{{ vngname }}"
+ name: "{{ natrulename }}"
+ type_properties_type: Static
+ mode: EgressSnat
+ internal_mappings:
+ - 10.1.0.0/24
+ external_mappings:
+ - 192.168.1.0/24
+ check_mode: true
+
+- name: Create a virtual netowrk nat rule
+ azure_rm_virtualnetworkgatewaynatrule:
+ resource_group: "{{ resource_group }}"
+ virtual_network_gateway_name: "{{ vngname }}"
+ name: "{{ natrulename }}"
+ type_properties_type: Static
+ mode: EgressSnat
+ internal_mappings:
+ - 10.1.0.0/24
+ external_mappings:
+ - 192.168.1.0/24
+ register: output
+
+- name: Assert the virtual network nat rule is well created
+ ansible.builtin.assert:
+ that:
+ - output.changed
+
+- name: Create a virtual netowrk nat rule(Idempotent test)
+ azure_rm_virtualnetworkgatewaynatrule:
+ resource_group: "{{ resource_group }}"
+ virtual_network_gateway_name: "{{ vngname }}"
+ name: "{{ natrulename }}"
+ type_properties_type: Static
+ mode: EgressSnat
+ internal_mappings:
+ - 10.1.0.0/24
+ external_mappings:
+ - 192.168.1.0/24
+ register: output
+
+- name: Assert the virtual network nat rule no changed
+ ansible.builtin.assert:
+ that:
+ - not output.changed
+
+- name: Create a new virtual netowrk nat rule (Update test)
+ azure_rm_virtualnetworkgatewaynatrule:
+ resource_group: "{{ resource_group }}"
+ virtual_network_gateway_name: "{{ vngname }}"
+ name: "{{ natrulename }}"
+ type_properties_type: Static
+ mode: EgressSnat
+ internal_mappings:
+ - 10.3.0.0/24
+ external_mappings:
+ - 192.168.2.0/24
+ register: output
+
+- name: Assert the virtual network nat rule is well Updated
+ ansible.builtin.assert:
+ that:
+ - output.changed
+
+- name: Get the virtual netowrk nat rule facts
+ azure_rm_virtualnetworkgatewaynatrule_info:
+ resource_group: "{{ resource_group }}"
+ virtual_network_gateway_name: "{{ vngname }}"
+ name: "{{ natrulename }}"
+ register: output
+
+- name: Assert the virtual network nat rule facts
+ ansible.builtin.assert:
+ that:
+ - output.state[0].mode == 'EgressSnat'
+ - output.state[0].internal_mappings == ["10.3.0.0/24"]
+ - output.state[0].external_mappings == ["192.168.2.0/24"]
+
+- name: Delete the virtual netowrk nat rule
+ azure_rm_virtualnetworkgatewaynatrule:
+ resource_group: "{{ resource_group }}"
+ virtual_network_gateway_name: "{{ vngname }}"
+ name: "{{ natrulename }}"
+ state: absent
+ register: output
+
+- name: Assert the virtual network nat rule deleted
+ ansible.builtin.assert:
+ that:
+ - output.changed
diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webapp/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webapp/tasks/main.yml
index 6b8128eb3..e766953fc 100644
--- a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webapp/tasks/main.yml
+++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webapp/tasks/main.yml
@@ -557,6 +557,74 @@
- facts.webapps[0].ftps_state == 'Disabled'
- not facts.webapps[0].http20_enabled
+- name: Create a windows web app with site_auth_settings
+ azure_rm_webapp:
+ resource_group: "{{ resource_group }}"
+ name: "{{ win_app_name }}-auth"
+ plan:
+ resource_group: "{{ resource_group }}"
+ name: "{{ win_plan_name }}-auth"
+ is_linux: false
+ sku: S1
+ site_auth_settings:
+ client_id: "{{ azure_client_id }}"
+ default_provider: 'MicrosoftAccount'
+ runtime_version: '-2'
+ token_refresh_extension_hours: 90
+ unauthenticated_client_action: 'RedirectToLoginPage'
+ client_secret: "{{ azure_secret }}"
+ token_store_enabled: true
+ is_auth_from_file: false
+ enabled: false
+ register: output
+
+- name: Assert the web app is well created
+ ansible.builtin.assert:
+ that:
+ output.changed
+
+- name: Update the web app with site_auth_settings
+ azure_rm_webapp:
+ resource_group: "{{ resource_group }}"
+ name: "{{ win_app_name }}-auth"
+ plan:
+ resource_group: "{{ resource_group }}"
+ name: "{{ win_plan_name }}-auth"
+ is_linux: false
+ sku: S1
+ site_auth_settings:
+ client_id: "{{ azure_client_id }}"
+ default_provider: 'MicrosoftAccount'
+ runtime_version: '-3'
+ token_refresh_extension_hours: 100
+ unauthenticated_client_action: 'RedirectToLoginPage'
+ client_secret: "{{ azure_secret }}"
+ token_store_enabled: false
+ is_auth_from_file: false
+ enabled: true
+ register: output
+
+- name: Assert the web app is well updated
+ ansible.builtin.assert:
+ that:
+ output.changed
+
+- name: Get the web app facts
+ azure_rm_webapp_info:
+ resource_group: "{{ resource_group }}"
+ name: "{{ win_app_name }}-auth"
+ register: output
+
+- name: Assert the web app facts
+ ansible.builtin.assert:
+ that:
+ - output.webapps[0].site_auth_settings.enabled is true
+ - output.webapps[0].site_auth_settings.is_auth_from_file == 'false'
+ - output.webapps[0].site_auth_settings.runtime_version == '-3'
+ - output.webapps[0].site_auth_settings.token_refresh_extension_hours == 100.0
+ - output.webapps[0].site_auth_settings.token_store_enabled is false
+ - output.webapps[0].site_auth_settings.unauthenticated_client_action == 'RedirectToLoginPage'
+
- name: Create a webapp slot (Check mode)
azure_rm_webappslot:
resource_group: "{{ resource_group }}"
diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webappaccessrestriction/tasks/main.yml b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webappaccessrestriction/tasks/main.yml
index 9850dd148..a9496c992 100644
--- a/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webappaccessrestriction/tasks/main.yml
+++ b/ansible_collections/azure/azcollection/tests/integration/targets/azure_rm_webappaccessrestriction/tasks/main.yml
@@ -3,6 +3,20 @@
rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
run_once: true
+- name: Create virtual network
+ azure_rm_virtualnetwork:
+ resource_group: "{{ resource_group }}"
+ name: "vnet-{{ rpfx }}"
+ address_prefixes: 10.42.0.0/24
+
+- name: Create subnet
+ azure_rm_subnet:
+ resource_group: "{{ resource_group }}"
+ name: "subnet-{{ rpfx }}"
+ address_prefix: 10.42.0.0/28
+ virtual_network: "vnet-{{ rpfx }}"
+ register: subnet_output
+
- name: Create a web app
azure_rm_webapp:
resource_group: "{{ resource_group }}"
@@ -26,9 +40,16 @@
action: "Allow"
ip_address: "2.2.2.2/24"
priority: 2
+ - name: "Datacenter 3"
+ action: Allow
+ priority: 3
+ description: "fred test 03"
+ tag: XffProxy
+ vnet_subnet_resource_id: "{{ subnet_output.state.id }}"
scm_ip_security_restrictions_use_main: true
register: output
check_mode: true
+
- name: Assert the resource is well created
ansible.builtin.assert:
that: output.changed
@@ -46,17 +67,26 @@
action: "Allow"
ip_address: "2.2.2.2/24"
priority: 2
+ - name: "Datacenter 3"
+ action: Allow
+ priority: 3
+ description: "fred test 03"
+ tag: XffProxy
+ vnet_subnet_resource_id: "{{ subnet_output.state.id }}"
scm_ip_security_restrictions_use_main: true
register: output
- name: Assert the resource is well created
ansible.builtin.assert:
that:
- output.changed
- - output.ip_security_restrictions | length == 2
+ - output.ip_security_restrictions | length == 3
- output.ip_security_restrictions[0].action == 'Allow'
- output.ip_security_restrictions[0].ip_address == '1.1.1.1/24'
- output.ip_security_restrictions[1].action == 'Allow'
- output.ip_security_restrictions[1].ip_address == '2.2.2.2/24'
+ - output.ip_security_restrictions[2].priority == 3
+ - output.ip_security_restrictions[2].tag == "XffProxy"
+ - output.ip_security_restrictions[2].vnet_subnet_resource_id == subnet_output.state.id
- output.scm_ip_security_restrictions_use_main == true
- name: "Check webapp access restriction facts 1"
@@ -68,11 +98,14 @@
ansible.builtin.assert:
that:
- not output.changed
- - output.ip_security_restrictions | length == 2
+ - output.ip_security_restrictions | length == 3
- output.ip_security_restrictions[0].action == 'Allow'
- output.ip_security_restrictions[0].ip_address == '1.1.1.1/24'
- output.ip_security_restrictions[1].action == 'Allow'
- output.ip_security_restrictions[1].ip_address == '2.2.2.2/24'
+ - output.ip_security_restrictions[2].priority == 3
+ - output.ip_security_restrictions[2].tag == "XffProxy"
+ - output.ip_security_restrictions[2].vnet_subnet_resource_id == subnet_output.state.id
- output.scm_ip_security_restrictions_use_main == true
- name: "Create webapp access restriction - idempotent"
@@ -88,8 +121,15 @@
action: "Allow"
ip_address: "2.2.2.2/24"
priority: 2
+ - name: "Datacenter 3"
+ action: Allow
+ priority: 3
+ description: "fred test 03"
+ tag: XffProxy
+ vnet_subnet_resource_id: "{{ subnet_output.state.id }}"
scm_ip_security_restrictions_use_main: true
register: output
+
- name: Assert the resource is not changed
ansible.builtin.assert:
that: not output.changed
diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/playbooks/setup.yml b/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/playbooks/setup.yml
index f162e9134..6e932d54b 100644
--- a/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/playbooks/setup.yml
+++ b/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/playbooks/setup.yml
@@ -47,3 +47,24 @@
sku: 20_04-lts
version: latest
register: vm_output
+
+ - name: Create minimal VM 2 with defaults
+ azure_rm_virtualmachine:
+ resource_group: "{{ resource_group }}"
+ name: "{{ vm_name_2 }}"
+ admin_username: testuser
+ ssh_password_enabled: false
+ ssh_public_keys:
+ - path: /home/testuser/.ssh/authorized_keys
+ key_data: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfoYlIV4lTPZTv7hXaVwQQuqBgGs4yeNRX0SPo2+HQt9u4X7IGwrtXc0nEUm6LfaCikMH58bOL8f20NTGz285kxdFHZRcBXtqmnMz2rXwhK9gwq5h1khc+GzHtdcJXsGA4y0xuaNcidcg04jxAlN/06fwb/VYwwWTVbypNC0gpGEpWckCNm8vlDlA55sU5et0SZ+J0RKVvEaweUOeNbFZqckGPA384imfeYlADppK/7eAxqfBVadVvZG8IJk4yvATgaIENIFj2cXxqu2mQ/Bp5Wr45uApvJsFXmi+v/nkiOEV1QpLOnEwAZo6EfFS4CCQtsymxJCl1PxdJ5LD4ZOtP xiuxi.sun@qq.com"
+ vm_size: Standard_B1ms
+ virtual_network: "{{ network_name }}"
+ image:
+ offer: 0001-com-ubuntu-server-focal
+ publisher: Canonical
+ sku: 20_04-lts
+ version: latest
+ tags:
+ Deployment-Method: Ansible
+ Automation-Method: Ansible
+ register: vm_output_2
diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/playbooks/teardown.yml b/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/playbooks/teardown.yml
index 2a131033a..51690d199 100644
--- a/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/playbooks/teardown.yml
+++ b/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/playbooks/teardown.yml
@@ -15,6 +15,13 @@
remove_on_absent: all_autocreated
state: absent
+ - name: Delete VM 2
+ azure_rm_virtualmachine:
+ resource_group: "{{ resource_group }}"
+ name: "{{ vm_name_2 }}"
+ remove_on_absent: all_autocreated
+ state: absent
+
- name: Destroy subnet
azure_rm_subnet:
resource_group: "{{ resource_group }}"
diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/playbooks/test_inventory_filter.yml b/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/playbooks/test_inventory_filter.yml
new file mode 100644
index 000000000..b3e715506
--- /dev/null
+++ b/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/playbooks/test_inventory_filter.yml
@@ -0,0 +1,21 @@
+---
+- name: Config hosts
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ tasks:
+ - name: Set facts
+ ansible.builtin.include_vars: vars.yml
+
+ - name: Refresh inventory
+ ansible.builtin.meta: refresh_inventory
+
+ - name: Test vm_name_2 in Inventory
+ ansible.builtin.assert:
+ that:
+ - vm_name_2 in hostvars
+
+ - name: Test vm_name not in Inventory
+ ansible.builtin.assert:
+ that:
+ - vm_name not in hostvars
diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/playbooks/vars.yml b/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/playbooks/vars.yml
index dc6bbe080..87fa5608f 100644
--- a/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/playbooks/vars.yml
+++ b/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/playbooks/vars.yml
@@ -5,6 +5,7 @@ uid_short: "{{ (resource_group ~ inventory_hostname) | hash('md5') | truncate(10
storage_account: "{{ 'stor' ~ uid }}"
availability_set: "{{ 'avbs' ~ uid_short }}"
vm_name: "{{ 'vm' ~ uid_short }}"
+vm_name_2: "{{ 'vm2' ~ uid_short }}"
network_name: "{{ 'vnet' ~ uid_short }}"
subnet_name: "{{ 'snet' ~ uid_short }}"
security_group: "{{ 'sg' ~ uid_short }}"
diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/runme.sh b/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/runme.sh
index 6f381c0a5..30c86d9ca 100755
--- a/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/runme.sh
+++ b/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/runme.sh
@@ -19,6 +19,11 @@ ansible-playbook playbooks/empty_inventory_config.yml "$@"
ansible-playbook playbooks/create_inventory_config.yml "$@" --extra-vars "template=basic2.yml"
ansible-playbook playbooks/test_inventory.yml "$@"
+# using host filters
+ansible-playbook playbooks/empty_inventory_config.yml "$@"
+ansible-playbook playbooks/create_inventory_config.yml "$@" --extra-vars "template=filter.yml"
+ansible-playbook playbooks/test_inventory_filter.yml "$@"
+
# teardown
ansible-playbook playbooks/teardown.yml "$@"
diff --git a/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/templates/filter.yml b/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/templates/filter.yml
new file mode 100644
index 000000000..a928de0da
--- /dev/null
+++ b/ansible_collections/azure/azcollection/tests/integration/targets/inventory_azure/templates/filter.yml
@@ -0,0 +1,14 @@
+---
+plugin: azure.azcollection.azure_rm
+conditional_groups:
+ azure: true
+exclude_host_filters:
+ - location not in ['eastus', 'northcentralus']
+ - powerstate != 'running'
+ - not (tags['Deployment-Method'] | default('Exclude') == 'Ansible' and tags['Automation-Method'] | default('Exclude') == 'Ansible')
+ # Customer tried to use the following filter but dashes in variable names is not allowed.
+ # Workaround was to use the dictionary access method above with defaults.
+ #- not (tags.Deployment-Method == 'Ansible' and tags.Automation-Method == 'Ansible')
+# fail_on_template_errors should be enabled for debugging and possibly all times.
+fail_on_template_errors: True
+plain_host_names: true
diff --git a/ansible_collections/azure/azcollection/tests/utils/ado/ado.sh b/ansible_collections/azure/azcollection/tests/utils/ado/ado.sh
index 4bae8b5d4..2258a81f2 100755
--- a/ansible_collections/azure/azcollection/tests/utils/ado/ado.sh
+++ b/ansible_collections/azure/azcollection/tests/utils/ado/ado.sh
@@ -64,8 +64,8 @@ cp -aT "${SHIPPABLE_BUILD_DIR}" "${TEST_DIR}"
cd "${TEST_DIR}"
mkdir -p shippable/testresults
-pip install -I -r "${TEST_DIR}/requirements-azure.txt"
-pip install -I -r "${TEST_DIR}/sanity-requirements-azure.txt"
+pip install -I -r "${TEST_DIR}/requirements.txt"
+pip install -I -r "${TEST_DIR}/sanity-requirements.txt"
pip install ansible-lint